From a1f7fa36cf86125b171831182c756676703a80dd Mon Sep 17 00:00:00 2001 From: Ian Wahbe Date: Mon, 16 Sep 2024 15:20:28 +0200 Subject: [PATCH] Upgrade terraform-provider-google-beta to v6.2.0 (#2383) This PR was generated via `$ upgrade-provider pulumi/pulumi-gcp`. --- - Upgrading terraform-provider-google-beta from 6.0.1 to 6.2.0. Fixes #2375 Fixes #2363 --- .pulumi-java-gen.version | 2 +- patches/0003-rebase-bigquery_dataset.patch | 2 +- patches/0004-website-docs-d-tweaks.patch | 2 +- patches/0005-docs-patching.patch | 58 +- ...default-labels-for-Import-and-Create.patch | 4 +- ...rovisioning-label-to-goog-pulumi-pro.patch | 4 +- .../pulumi-resource-gcp/bridge-metadata.json | 289 +- provider/cmd/pulumi-resource-gcp/schema.json | 2744 +++++++++++++++-- provider/go.mod | 54 +- provider/go.sum | 108 +- provider/resources.go | 2 +- sdk/dotnet/Alloydb/Cluster.cs | 44 + .../Inputs/ClusterTrialMetadataArgs.cs | 44 + .../Inputs/ClusterTrialMetadataGetArgs.cs | 44 + .../Alloydb/Outputs/ClusterTrialMetadata.cs | 49 + sdk/dotnet/AssuredWorkloads/Workload.cs | 82 +- .../BackupDisasterRecovery/BackupVault.cs | 593 ++++ sdk/dotnet/BigQuery/DataTransferConfig.cs | 97 +- ...ansferConfigEncryptionConfigurationArgs.cs | 26 + ...ferConfigEncryptionConfigurationGetArgs.cs | 26 + ...taTransferConfigEncryptionConfiguration.cs | 27 + .../BigQueryAnalyticsHub/DataExchange.cs | 48 + ...ataExchangeSharingEnvironmentConfigArgs.cs | 32 + ...gEnvironmentConfigDcrExchangeConfigArgs.cs | 20 + ...vironmentConfigDcrExchangeConfigGetArgs.cs | 20 + ...ironmentConfigDefaultExchangeConfigArgs.cs | 20 + ...nmentConfigDefaultExchangeConfigGetArgs.cs | 20 + ...ExchangeSharingEnvironmentConfigGetArgs.cs | 32 + .../Inputs/ListingBigqueryDatasetArgs.cs | 15 +- .../Inputs/ListingBigqueryDatasetGetArgs.cs | 15 +- ...tingBigqueryDatasetSelectedResourceArgs.cs | 28 + ...gBigqueryDatasetSelectedResourceGetArgs.cs | 28 + .../ListingRestrictedExportConfigArgs.cs | 7 + .../ListingRestrictedExportConfigGetArgs.cs | 7 + sdk/dotnet/BigQueryAnalyticsHub/Listing.cs | 81 + .../DataExchangeSharingEnvironmentConfig.cs | 35 + ...aringEnvironmentConfigDcrExchangeConfig.cs | 21 + ...gEnvironmentConfigDefaultExchangeConfig.cs | 21 + .../Outputs/ListingBigqueryDataset.cs | 13 +- .../ListingBigqueryDatasetSelectedResource.cs | 29 + .../Outputs/ListingRestrictedExportConfig.cs | 8 + .../BigTable/Inputs/TableColumnFamilyArgs.cs | 6 + .../Inputs/TableColumnFamilyGetArgs.cs | 6 + .../BigTable/Outputs/TableColumnFamily.cs | 10 +- sdk/dotnet/BigTable/Table.cs | 18 + sdk/dotnet/CertificateAuthority/Authority.cs | 9 +- sdk/dotnet/CertificateManager/Certificate.cs | 18 + .../CertificateManager/GetCertificates.cs | 162 + ...teManagedAuthorizationAttemptInfoResult.cs | 51 + ...rtificateManagedProvisioningIssueResult.cs | 37 + ...GetCertificatesCertificateManagedResult.cs | 67 + .../GetCertificatesCertificateResult.cs | 106 + .../Inputs/WorkerPoolWorkerConfigArgs.cs | 4 +- .../Inputs/WorkerPoolWorkerConfigGetArgs.cs | 4 +- .../Outputs/WorkerPoolWorkerConfig.cs | 4 +- .../Inputs/ServiceTemplateSpecVolumeArgs.cs | 3 +- .../ServiceTemplateSpecVolumeCsiArgs.cs | 3 +- .../ServiceTemplateSpecVolumeCsiGetArgs.cs | 3 +- .../ServiceTemplateSpecVolumeGetArgs.cs | 3 +- .../GetServiceTemplateSpecVolumeCsiResult.cs | 3 +- .../GetServiceTemplateSpecVolumeResult.cs | 3 +- .../Outputs/ServiceTemplateSpecVolume.cs | 3 +- .../Outputs/ServiceTemplateSpecVolumeCsi.cs | 3 +- .../Inputs/JobTemplateTemplateVolumeArgs.cs | 4 +- .../JobTemplateTemplateVolumeGetArgs.cs | 4 +- .../CloudRunV2/Inputs/ServiceTemplateArgs.cs | 7 + .../Inputs/ServiceTemplateGetArgs.cs | 7 + .../Inputs/ServiceTemplateServiceMeshArgs.cs | 28 + .../ServiceTemplateServiceMeshGetArgs.cs | 28 + .../Inputs/ServiceTemplateVolumeArgs.cs | 2 +- .../Inputs/ServiceTemplateVolumeGetArgs.cs | 2 +- .../Inputs/ServiceTemplateVolumeNfsArgs.cs | 2 - .../Inputs/ServiceTemplateVolumeNfsGetArgs.cs | 2 - .../GetJobTemplateTemplateVolumeResult.cs | 4 +- .../Outputs/GetServiceTemplateResult.cs | 7 + .../GetServiceTemplateServiceMeshResult.cs | 27 + .../Outputs/GetServiceTemplateVolumeResult.cs | 2 +- .../Outputs/JobTemplateTemplateVolume.cs | 4 +- .../CloudRunV2/Outputs/ServiceTemplate.cs | 8 + .../Outputs/ServiceTemplateServiceMesh.cs | 29 + .../Outputs/ServiceTemplateVolume.cs | 2 +- .../Outputs/ServiceTemplateVolumeNfs.cs | 2 - sdk/dotnet/CloudRunV2/Service.cs | 59 +- .../CloudTasks/Inputs/QueueHttpTargetArgs.cs | 74 + .../Inputs/QueueHttpTargetGetArgs.cs | 74 + .../QueueHttpTargetHeaderOverrideArgs.cs | 27 + .../QueueHttpTargetHeaderOverrideGetArgs.cs | 27 + ...QueueHttpTargetHeaderOverrideHeaderArgs.cs | 32 + ...ueHttpTargetHeaderOverrideHeaderGetArgs.cs | 32 + .../Inputs/QueueHttpTargetOauthTokenArgs.cs | 35 + .../QueueHttpTargetOauthTokenGetArgs.cs | 35 + .../Inputs/QueueHttpTargetOidcTokenArgs.cs | 34 + .../Inputs/QueueHttpTargetOidcTokenGetArgs.cs | 34 + .../Inputs/QueueHttpTargetUriOverrideArgs.cs | 73 + .../QueueHttpTargetUriOverrideGetArgs.cs | 73 + ...ueHttpTargetUriOverridePathOverrideArgs.cs | 26 + ...ttpTargetUriOverridePathOverrideGetArgs.cs | 26 + ...eHttpTargetUriOverrideQueryOverrideArgs.cs | 26 + ...tpTargetUriOverrideQueryOverrideGetArgs.cs | 26 + .../CloudTasks/Outputs/QueueHttpTarget.cs | 74 + .../Outputs/QueueHttpTargetHeaderOverride.cs | 28 + .../QueueHttpTargetHeaderOverrideHeader.cs | 35 + .../Outputs/QueueHttpTargetOauthToken.cs | 38 + .../Outputs/QueueHttpTargetOidcToken.cs | 37 + .../Outputs/QueueHttpTargetUriOverride.cs | 80 + .../QueueHttpTargetUriOverridePathOverride.cs | 27 + ...QueueHttpTargetUriOverrideQueryOverride.cs | 27 + sdk/dotnet/CloudTasks/Queue.cs | 155 + sdk/dotnet/Compute/GetInstance.cs | 2 +- sdk/dotnet/Compute/HealthCheck.cs | 87 + .../Compute/Inputs/FirewallAllowArgs.cs | 2 +- .../Compute/Inputs/FirewallAllowGetArgs.cs | 2 +- sdk/dotnet/Compute/Inputs/FirewallDenyArgs.cs | 2 +- .../Compute/Inputs/FirewallDenyGetArgs.cs | 2 +- .../Compute/Inputs/InstanceBootDiskArgs.cs | 6 + .../Compute/Inputs/InstanceBootDiskGetArgs.cs | 6 + .../InstanceFromMachineImageBootDiskArgs.cs | 6 + ...InstanceFromMachineImageBootDiskGetArgs.cs | 6 + .../InstanceFromTemplateBootDiskArgs.cs | 6 + .../InstanceFromTemplateBootDiskGetArgs.cs | 6 + .../Inputs/InstanceNetworkInterfaceArgs.cs | 2 +- .../Inputs/InstanceNetworkInterfaceGetArgs.cs | 2 +- .../Inputs/NodeTemplateAcceleratorArgs.cs | 34 + .../Inputs/NodeTemplateAcceleratorGetArgs.cs | 34 + sdk/dotnet/Compute/Instance.cs | 75 +- sdk/dotnet/Compute/InstanceTemplate.cs | 60 + sdk/dotnet/Compute/Interconnect.cs | 21 +- sdk/dotnet/Compute/NodeTemplate.cs | 68 + sdk/dotnet/Compute/Outputs/FirewallAllow.cs | 2 +- sdk/dotnet/Compute/Outputs/FirewallDeny.cs | 2 +- .../Outputs/GetInstanceBootDiskResult.cs | 7 + .../Compute/Outputs/InstanceBootDisk.cs | 7 + .../InstanceFromMachineImageBootDisk.cs | 7 + .../Outputs/InstanceFromTemplateBootDisk.cs | 7 + .../Outputs/InstanceNetworkInterface.cs | 2 +- .../Outputs/NodeTemplateAccelerator.cs | 37 + sdk/dotnet/Compute/TargetHttpsProxy.cs | 12 + sdk/dotnet/Container/AttachedCluster.cs | 6 +- .../ClusterNodeConfigKubeletConfigArgs.cs | 6 + .../ClusterNodeConfigKubeletConfigGetArgs.cs | 6 + .../Inputs/ClusterNodePoolAutoConfigArgs.cs | 9 +- .../ClusterNodePoolAutoConfigGetArgs.cs | 9 +- ...NodePoolAutoConfigNodeKubeletConfigArgs.cs | 26 + ...ePoolAutoConfigNodeKubeletConfigGetArgs.cs | 26 + ...rNodePoolDefaultsNodeConfigDefaultsArgs.cs | 6 + ...dePoolDefaultsNodeConfigDefaultsGetArgs.cs | 6 + ...sterNodePoolNodeConfigKubeletConfigArgs.cs | 6 + ...rNodePoolNodeConfigKubeletConfigGetArgs.cs | 6 + .../NodePoolNodeConfigKubeletConfigArgs.cs | 6 + .../NodePoolNodeConfigKubeletConfigGetArgs.cs | 6 + .../Outputs/ClusterNodeConfigKubeletConfig.cs | 7 + .../Outputs/ClusterNodePoolAutoConfig.cs | 10 +- ...sterNodePoolAutoConfigNodeKubeletConfig.cs | 27 + ...usterNodePoolDefaultsNodeConfigDefaults.cs | 7 + .../ClusterNodePoolNodeConfigKubeletConfig.cs | 7 + ...GetClusterNodeConfigKubeletConfigResult.cs | 7 + ...dePoolAutoConfigNodeKubeletConfigResult.cs | 27 + .../GetClusterNodePoolAutoConfigResult.cs | 7 + ...rNodePoolDefaultNodeConfigDefaultResult.cs | 7 + ...erNodePoolNodeConfigKubeletConfigResult.cs | 7 + .../NodePoolNodeConfigKubeletConfig.cs | 7 + .../PreventionDiscoveryConfigActionArgs.cs | 7 + .../PreventionDiscoveryConfigActionGetArgs.cs | 7 + ...onDiscoveryConfigActionTagResourcesArgs.cs | 52 + ...iscoveryConfigActionTagResourcesGetArgs.cs | 52 + ...onfigActionTagResourcesTagConditionArgs.cs | 34 + ...igActionTagResourcesTagConditionGetArgs.cs | 34 + ...sourcesTagConditionSensitivityScoreArgs.cs | 27 + ...rcesTagConditionSensitivityScoreGetArgs.cs | 27 + ...igActionTagResourcesTagConditionTagArgs.cs | 26 + ...ctionTagResourcesTagConditionTagGetArgs.cs | 26 + ...ryConfigTargetBigQueryTargetCadenceArgs.cs | 7 + ...onfigTargetBigQueryTargetCadenceGetArgs.cs | 7 + ...denceInspectTemplateModifiedCadenceArgs.cs | 27 + ...ceInspectTemplateModifiedCadenceGetArgs.cs | 27 + ...rgetCloudSqlTargetGenerationCadenceArgs.cs | 7 + ...tCloudSqlTargetGenerationCadenceGetArgs.cs | 7 + ...denceInspectTemplateModifiedCadenceArgs.cs | 27 + ...ceInspectTemplateModifiedCadenceGetArgs.cs | 27 + .../PreventionDiscoveryConfigAction.cs | 10 +- ...entionDiscoveryConfigActionTagResources.cs | 44 + ...eryConfigActionTagResourcesTagCondition.cs | 37 + ...agResourcesTagConditionSensitivityScore.cs | 28 + ...ConfigActionTagResourcesTagConditionTag.cs | 27 + ...coveryConfigTargetBigQueryTargetCadence.cs | 8 + ...etCadenceInspectTemplateModifiedCadence.cs | 28 + ...igTargetCloudSqlTargetGenerationCadence.cs | 8 + ...onCadenceInspectTemplateModifiedCadence.cs | 28 + .../ConnectionProfile.cs | 183 ++ .../Inputs/ConnectionProfileMysqlArgs.cs | 22 +- .../Inputs/ConnectionProfileMysqlGetArgs.cs | 22 +- .../Inputs/ConnectionProfilePostgresqlArgs.cs | 28 +- .../ConnectionProfilePostgresqlGetArgs.cs | 28 +- .../Outputs/ConnectionProfileMysql.cs | 24 +- .../Outputs/ConnectionProfilePostgresql.cs | 31 +- ...mplatePlacementManagedClusterConfigArgs.cs | 2 +- ...anagedClusterConfigGceClusterConfigArgs.cs | 4 +- ...gedClusterConfigGceClusterConfigGetArgs.cs | 4 +- ...atePlacementManagedClusterConfigGetArgs.cs | 2 +- ...edClusterConfigInitializationActionArgs.cs | 2 +- ...lusterConfigInitializationActionGetArgs.cs | 2 +- ...ManagedClusterConfigLifecycleConfigArgs.cs | 8 +- ...agedClusterConfigLifecycleConfigGetArgs.cs | 8 +- ...entManagedClusterConfigMasterConfigArgs.cs | 2 +- ...ManagedClusterConfigMasterConfigGetArgs.cs | 2 +- ...owTemplatePlacementManagedClusterConfig.cs | 2 +- ...entManagedClusterConfigGceClusterConfig.cs | 4 +- ...anagedClusterConfigInitializationAction.cs | 2 +- ...mentManagedClusterConfigLifecycleConfig.cs | 8 +- ...acementManagedClusterConfigMasterConfig.cs | 2 +- ...amSourceConfigSqlServerSourceConfigArgs.cs | 12 + ...igSqlServerSourceConfigChangeTablesArgs.cs | 20 + ...qlServerSourceConfigChangeTablesGetArgs.cs | 20 + ...ourceConfigSqlServerSourceConfigGetArgs.cs | 12 + ...qlServerSourceConfigTransactionLogsArgs.cs | 20 + ...erverSourceConfigTransactionLogsGetArgs.cs | 20 + ...StreamSourceConfigSqlServerSourceConfig.cs | 16 +- ...ConfigSqlServerSourceConfigChangeTables.cs | 21 + ...figSqlServerSourceConfigTransactionLogs.cs | 21 + sdk/dotnet/Datastream/Stream.cs | 151 +- sdk/dotnet/DiscoveryEngine/DataStore.cs | 12 +- .../DataStoreDocumentProcessingConfigArgs.cs | 7 + ...umentProcessingConfigChunkingConfigArgs.cs | 27 + ...ntProcessingConfigChunkingConfigGetArgs.cs | 27 + ...kingConfigLayoutBasedChunkingConfigArgs.cs | 34 + ...gConfigLayoutBasedChunkingConfigGetArgs.cs | 34 + ...rocessingConfigDefaultParsingConfigArgs.cs | 6 + ...essingConfigDefaultParsingConfigGetArgs.cs | 6 + ...ultParsingConfigLayoutParsingConfigArgs.cs | 20 + ...ParsingConfigLayoutParsingConfigGetArgs.cs | 20 + ...ataStoreDocumentProcessingConfigGetArgs.cs | 7 + ...ocessingConfigParsingConfigOverrideArgs.cs | 6 + ...ssingConfigParsingConfigOverrideGetArgs.cs | 6 + ...ngConfigOverrideLayoutParsingConfigArgs.cs | 20 + ...onfigOverrideLayoutParsingConfigGetArgs.cs | 20 + .../DataStoreDocumentProcessingConfig.cs | 8 + ...eDocumentProcessingConfigChunkingConfig.cs | 28 + ...ChunkingConfigLayoutBasedChunkingConfig.cs | 37 + ...entProcessingConfigDefaultParsingConfig.cs | 7 + ...DefaultParsingConfigLayoutParsingConfig.cs | 21 + ...ntProcessingConfigParsingConfigOverride.cs | 7 + ...arsingConfigOverrideLayoutParsingConfig.cs | 21 + sdk/dotnet/Firebase/DatabaseInstance.cs | 6 +- sdk/dotnet/GkeHub/FeatureMembership.cs | 9 +- .../FeatureMembershipConfigmanagementArgs.cs | 8 + ...eatureMembershipConfigmanagementGetArgs.cs | 8 + .../FeatureMembershipConfigmanagement.cs | 8 + .../Iam/GetWorkloadIdentityPoolProvider.cs | 6 +- .../WorkloadIdentityPoolProviderSamlArgs.cs | 2 + ...WorkloadIdentityPoolProviderSamlGetArgs.cs | 2 + .../WorkloadIdentityPoolProviderX509Args.cs | 30 + ...WorkloadIdentityPoolProviderX509GetArgs.cs | 30 + ...dIdentityPoolProviderX509TrustStoreArgs.cs | 50 + ...entityPoolProviderX509TrustStoreGetArgs.cs | 50 + ...roviderX509TrustStoreIntermediateCaArgs.cs | 27 + ...iderX509TrustStoreIntermediateCaGetArgs.cs | 27 + ...olProviderX509TrustStoreTrustAnchorArgs.cs | 27 + ...roviderX509TrustStoreTrustAnchorGetArgs.cs | 27 + ...tWorkloadIdentityPoolProviderX509Result.cs | 31 + ...viderX509TrustStoreIntermediateCaResult.cs | 28 + ...dentityPoolProviderX509TrustStoreResult.cs | 39 + ...ProviderX509TrustStoreTrustAnchorResult.cs | 28 + .../WorkloadIdentityPoolProviderSaml.cs | 2 + .../WorkloadIdentityPoolProviderX509.cs | 31 + ...kloadIdentityPoolProviderX509TrustStore.cs | 41 + ...oolProviderX509TrustStoreIntermediateCa.cs | 28 + ...tyPoolProviderX509TrustStoreTrustAnchor.cs | 28 + .../Iam/WorkloadIdentityPoolProvider.cs | 125 + sdk/dotnet/Kms/AutokeyConfig.cs | 15 +- sdk/dotnet/Kms/GetCryptoKeyLatestVersion.cs | 208 ++ sdk/dotnet/Kms/GetCryptoKeyVersions.cs | 187 ++ ...etCryptoKeyLatestVersionPublicKeyResult.cs | 35 + .../GetCryptoKeyVersionsPublicKeyResult.cs | 35 + ...CryptoKeyVersionsVersionPublicKeyResult.cs | 35 + .../GetCryptoKeyVersionsVersionResult.cs | 60 + sdk/dotnet/Netapp/ActiveDirectory.cs | 6 +- sdk/dotnet/Netapp/Backup.cs | 6 +- sdk/dotnet/Netapp/BackupPolicy.cs | 6 +- sdk/dotnet/Netapp/BackupVault.cs | 6 +- sdk/dotnet/Netapp/StoragePool.cs | 4 +- sdk/dotnet/Netapp/Volume.cs | 3 + .../Inputs/SpokeLinkedVpcNetworkArgs.cs | 12 + .../Inputs/SpokeLinkedVpcNetworkGetArgs.cs | 12 + .../Outputs/SpokeLinkedVpcNetwork.cs | 7 + sdk/dotnet/NetworkConnectivity/Spoke.cs | 5 + sdk/dotnet/NetworkSecurity/ClientTlsPolicy.cs | 15 +- sdk/dotnet/NetworkSecurity/ServerTlsPolicy.cs | 20 +- sdk/dotnet/Organizations/GetProject.cs | 6 +- sdk/dotnet/Organizations/Project.cs | 58 + sdk/dotnet/ParallelStore/Instance.cs | 106 +- sdk/dotnet/Projects/IamMemberRemove.cs | 22 + sdk/dotnet/Projects/UsageExportBucket.cs | 28 + .../SubscriptionCloudStorageConfigArgs.cs | 6 + ...riptionCloudStorageConfigAvroConfigArgs.cs | 6 + ...tionCloudStorageConfigAvroConfigGetArgs.cs | 6 + .../SubscriptionCloudStorageConfigGetArgs.cs | 6 + ...ptionCloudStorageConfigAvroConfigResult.cs | 10 +- ...GetSubscriptionCloudStorageConfigResult.cs | 7 + .../Outputs/SubscriptionCloudStorageConfig.cs | 7 + ...ubscriptionCloudStorageConfigAvroConfig.cs | 10 +- sdk/dotnet/PubSub/Subscription.cs | 3 + sdk/dotnet/Redis/Cluster.cs | 72 + .../Inputs/ClusterMaintenancePolicyArgs.cs | 53 + .../Inputs/ClusterMaintenancePolicyGetArgs.cs | 53 + ...enancePolicyWeeklyMaintenanceWindowArgs.cs | 52 + ...ncePolicyWeeklyMaintenanceWindowGetArgs.cs | 52 + ...icyWeeklyMaintenanceWindowStartTimeArgs.cs | 46 + ...WeeklyMaintenanceWindowStartTimeGetArgs.cs | 46 + .../Inputs/ClusterMaintenanceScheduleArgs.cs | 48 + .../ClusterMaintenanceScheduleGetArgs.cs | 48 + .../Redis/Outputs/ClusterMaintenancePolicy.cs | 51 + ...aintenancePolicyWeeklyMaintenanceWindow.cs | 56 + ...ePolicyWeeklyMaintenanceWindowStartTime.cs | 51 + .../Outputs/ClusterMaintenanceSchedule.cs | 52 + .../V2FolderSccBigQueryExport.cs | 412 +++ .../V2OrganizationSccBigQueryExports.cs | 4 +- .../V2ProjectSccBigQueryExport.cs | 368 +++ sdk/go.mod | 1 + sdk/go.sum | 2 + sdk/go/gcp/alloydb/cluster.go | 42 + sdk/go/gcp/alloydb/pulumiTypes.go | 128 + sdk/go/gcp/assuredworkloads/workload.go | 93 +- .../gcp/backupdisasterrecovery/backupVault.go | 689 +++++ sdk/go/gcp/backupdisasterrecovery/init.go | 7 + sdk/go/gcp/bigquery/dataTransferConfig.go | 110 +- sdk/go/gcp/bigquery/pulumiTypes.go | 141 + .../gcp/bigqueryanalyticshub/dataExchange.go | 58 + sdk/go/gcp/bigqueryanalyticshub/listing.go | 90 + .../gcp/bigqueryanalyticshub/pulumiTypes.go | 569 +++- sdk/go/gcp/bigtable/pulumiTypes.go | 9 + sdk/go/gcp/bigtable/table.go | 19 + sdk/go/gcp/certificateauthority/authority.go | 18 +- sdk/go/gcp/certificatemanager/certificate.go | 11 + .../gcp/certificatemanager/getCertificates.go | 151 + sdk/go/gcp/certificatemanager/pulumiTypes.go | 646 ++++ sdk/go/gcp/cloudbuild/pulumiTypes.go | 16 +- sdk/go/gcp/cloudrun/pulumiTypes.go | 39 +- sdk/go/gcp/cloudrunv2/pulumiTypes.go | 326 +- sdk/go/gcp/cloudrunv2/service.go | 58 +- sdk/go/gcp/cloudtasks/pulumiTypes.go | 1382 +++++++++ sdk/go/gcp/cloudtasks/queue.go | 155 + sdk/go/gcp/compute/getInstance.go | 4 +- sdk/go/gcp/compute/healthCheck.go | 105 + sdk/go/gcp/compute/instance.go | 78 +- sdk/go/gcp/compute/instanceTemplate.go | 62 + sdk/go/gcp/compute/interconnect.go | 42 +- sdk/go/gcp/compute/nodeTemplate.go | 66 + sdk/go/gcp/compute/pulumiTypes.go | 395 ++- sdk/go/gcp/compute/pulumiTypes1.go | 223 ++ sdk/go/gcp/compute/targetHttpsProxy.go | 24 + sdk/go/gcp/container/attachedCluster.go | 12 +- sdk/go/gcp/container/pulumiTypes.go | 393 ++- .../connectionProfile.go | 201 ++ .../databasemigrationservice/pulumiTypes.go | 163 +- sdk/go/gcp/dataloss/pulumiTypes.go | 1023 +++++- sdk/go/gcp/dataproc/pulumiTypes.go | 70 +- sdk/go/gcp/datastream/pulumiTypes.go | 286 ++ sdk/go/gcp/datastream/stream.go | 150 +- sdk/go/gcp/discoveryengine/dataStore.go | 24 +- sdk/go/gcp/discoveryengine/pulumiTypes.go | 620 ++++ sdk/go/gcp/firebase/databaseInstance.go | 12 +- sdk/go/gcp/gkehub/featureMembership.go | 9 +- sdk/go/gcp/gkehub/pulumiTypes.go | 32 + .../iam/getWorkloadIdentityPoolProvider.go | 5 + sdk/go/gcp/iam/pulumiTypes.go | 1012 ++++++ .../gcp/iam/workloadIdentityPoolProvider.go | 143 + sdk/go/gcp/kms/autokeyConfig.go | 12 +- sdk/go/gcp/kms/getCryptoKeyLatestVersion.go | 191 ++ sdk/go/gcp/kms/getCryptoKeyVersions.go | 167 + sdk/go/gcp/kms/pulumiTypes.go | 479 +++ sdk/go/gcp/netapp/activeDirectory.go | 6 +- sdk/go/gcp/netapp/backup.go | 6 +- sdk/go/gcp/netapp/backupPolicy.go | 6 +- sdk/go/gcp/netapp/backupVault.go | 6 +- sdk/go/gcp/netapp/storagePool.go | 4 +- sdk/go/gcp/netapp/volume.go | 6 + sdk/go/gcp/networkconnectivity/pulumiTypes.go | 19 + sdk/go/gcp/networkconnectivity/spoke.go | 4 + sdk/go/gcp/networksecurity/clientTlsPolicy.go | 13 +- sdk/go/gcp/networksecurity/serverTlsPolicy.go | 16 +- sdk/go/gcp/organizations/getProject.go | 5 + sdk/go/gcp/organizations/project.go | 50 + sdk/go/gcp/parallelstore/instance.go | 236 +- sdk/go/gcp/projects/iamMemberRemove.go | 35 + sdk/go/gcp/projects/usageExportBucket.go | 35 + sdk/go/gcp/pubsub/pulumiTypes.go | 56 + sdk/go/gcp/pubsub/subscription.go | 5 +- sdk/go/gcp/redis/cluster.go | 56 + sdk/go/gcp/redis/pulumiTypes.go | 616 ++++ sdk/go/gcp/securitycenter/init.go | 14 + .../v2folderSccBigQueryExport.go | 616 ++++ .../v2organizationSccBigQueryExports.go | 4 +- .../v2projectSccBigQueryExport.go | 552 ++++ sdk/java/build.gradle | 2 +- .../java/com/pulumi/gcp/alloydb/Cluster.java | 35 + .../com/pulumi/gcp/alloydb/ClusterArgs.java | 45 + .../gcp/alloydb/inputs/ClusterState.java | 98 + .../inputs/ClusterTrialMetadataArgs.java | 194 ++ .../alloydb/outputs/ClusterTrialMetadata.java | 120 + .../pulumi/gcp/assuredworkloads/Workload.java | 84 +- .../gcp/assuredworkloads/WorkloadArgs.java | 53 +- .../inputs/WorkloadState.java | 53 +- .../backupdisasterrecovery/BackupVault.java | 527 ++++ .../BackupVaultArgs.java | 517 ++++ .../inputs/BackupVaultState.java | 996 ++++++ .../gcp/bigquery/DataTransferConfig.java | 108 +- .../gcp/bigquery/DataTransferConfigArgs.java | 42 + ...sferConfigEncryptionConfigurationArgs.java | 85 + .../inputs/DataTransferConfigState.java | 42 + ...TransferConfigEncryptionConfiguration.java | 58 + .../bigqueryanalyticshub/DataExchange.java | 61 + .../DataExchangeArgs.java | 46 + .../gcp/bigqueryanalyticshub/Listing.java | 99 + ...aExchangeSharingEnvironmentConfigArgs.java | 121 + ...nvironmentConfigDcrExchangeConfigArgs.java | 28 + ...onmentConfigDefaultExchangeConfigArgs.java | 28 + .../inputs/DataExchangeState.java | 46 + .../inputs/ListingBigqueryDatasetArgs.java | 64 +- ...ngBigqueryDatasetSelectedResourceArgs.java | 91 + .../ListingRestrictedExportConfigArgs.java | 41 + .../DataExchangeSharingEnvironmentConfig.java | 79 + ...ingEnvironmentConfigDcrExchangeConfig.java | 32 + ...nvironmentConfigDefaultExchangeConfig.java | 32 + .../outputs/ListingBigqueryDataset.java | 33 +- ...istingBigqueryDatasetSelectedResource.java | 61 + .../ListingRestrictedExportConfig.java | 23 + .../java/com/pulumi/gcp/bigtable/Table.java | 18 + .../inputs/TableColumnFamilyArgs.java | 39 + .../bigtable/outputs/TableColumnFamily.java | 23 + .../gcp/certificateauthority/Authority.java | 6 +- .../certificateauthority/AuthorityArgs.java | 12 +- .../inputs/AuthorityState.java | 12 +- .../gcp/certificatemanager/Certificate.java | 14 + .../CertificatemanagerFunctions.java | 453 +++ .../inputs/CertificateState.java | 48 + .../inputs/GetCertificatesArgs.java | 120 + .../inputs/GetCertificatesPlainArgs.java | 99 + .../outputs/GetCertificatesCertificate.java | 302 ++ .../GetCertificatesCertificateManaged.java | 196 ++ ...ficateManagedAuthorizationAttemptInfo.java | 131 + ...esCertificateManagedProvisioningIssue.java | 85 + .../outputs/GetCertificatesResult.java | 106 + .../inputs/WorkerPoolWorkerConfigArgs.java | 16 +- .../outputs/WorkerPoolWorkerConfig.java | 8 +- .../inputs/ServiceTemplateSpecVolumeArgs.java | 12 +- .../ServiceTemplateSpecVolumeCsiArgs.java | 12 +- .../outputs/GetServiceTemplateSpecVolume.java | 6 +- .../GetServiceTemplateSpecVolumeCsi.java | 6 +- .../outputs/ServiceTemplateSpecVolume.java | 6 +- .../outputs/ServiceTemplateSpecVolumeCsi.java | 6 +- .../com/pulumi/gcp/cloudrunv2/Service.java | 66 +- .../inputs/JobTemplateTemplateVolumeArgs.java | 16 +- .../inputs/ServiceTemplateArgs.java | 42 + .../ServiceTemplateServiceMeshArgs.java | 91 + .../inputs/ServiceTemplateVolumeArgs.java | 8 +- .../inputs/ServiceTemplateVolumeNfsArgs.java | 8 - .../outputs/GetJobTemplateTemplateVolume.java | 8 +- .../outputs/GetServiceTemplate.java | 27 + .../GetServiceTemplateServiceMesh.java | 58 + .../outputs/GetServiceTemplateVolume.java | 4 +- .../outputs/JobTemplateTemplateVolume.java | 8 +- .../cloudrunv2/outputs/ServiceTemplate.java | 24 + .../outputs/ServiceTemplateServiceMesh.java | 61 + .../outputs/ServiceTemplateVolume.java | 4 +- .../outputs/ServiceTemplateVolumeNfs.java | 4 - .../java/com/pulumi/gcp/cloudtasks/Queue.java | 175 ++ .../com/pulumi/gcp/cloudtasks/QueueArgs.java | 42 + .../inputs/QueueHttpTargetArgs.java | 325 ++ .../QueueHttpTargetHeaderOverrideArgs.java | 89 + ...eueHttpTargetHeaderOverrideHeaderArgs.java | 125 + .../inputs/QueueHttpTargetOauthTokenArgs.java | 136 + .../inputs/QueueHttpTargetOidcTokenArgs.java | 132 + .../QueueHttpTargetUriOverrideArgs.java | 338 ++ ...HttpTargetUriOverridePathOverrideArgs.java | 83 + ...ttpTargetUriOverrideQueryOverrideArgs.java | 83 + .../gcp/cloudtasks/inputs/QueueState.java | 42 + .../cloudtasks/outputs/QueueHttpTarget.java | 185 ++ .../QueueHttpTargetHeaderOverride.java | 60 + .../QueueHttpTargetHeaderOverrideHeader.java | 81 + .../outputs/QueueHttpTargetOauthToken.java | 87 + .../outputs/QueueHttpTargetOidcToken.java | 85 + .../outputs/QueueHttpTargetUriOverride.java | 198 ++ ...ueueHttpTargetUriOverridePathOverride.java | 57 + ...eueHttpTargetUriOverrideQueryOverride.java | 57 + .../com/pulumi/gcp/compute/HealthCheck.java | 132 + .../java/com/pulumi/gcp/compute/Instance.java | 78 +- .../pulumi/gcp/compute/InstanceTemplate.java | 66 + .../com/pulumi/gcp/compute/Interconnect.java | 14 +- .../pulumi/gcp/compute/InterconnectArgs.java | 35 +- .../com/pulumi/gcp/compute/NodeTemplate.java | 67 + .../pulumi/gcp/compute/NodeTemplateArgs.java | 59 + .../pulumi/gcp/compute/TargetHttpsProxy.java | 8 + .../gcp/compute/TargetHttpsProxyArgs.java | 16 + .../gcp/compute/inputs/FirewallAllowArgs.java | 10 +- .../gcp/compute/inputs/FirewallDenyArgs.java | 10 +- .../compute/inputs/InstanceBootDiskArgs.java | 37 + .../InstanceFromMachineImageBootDiskArgs.java | 37 + .../InstanceFromTemplateBootDiskArgs.java | 37 + .../inputs/InstanceNetworkInterfaceArgs.java | 8 +- .../gcp/compute/inputs/InstanceState.java | 8 +- .../gcp/compute/inputs/InterconnectState.java | 35 +- .../inputs/NodeTemplateAcceleratorArgs.java | 129 + .../gcp/compute/inputs/NodeTemplateState.java | 59 + .../compute/inputs/TargetHttpsProxyState.java | 16 + .../gcp/compute/outputs/FirewallAllow.java | 4 +- .../gcp/compute/outputs/FirewallDeny.java | 4 +- .../compute/outputs/GetInstanceBootDisk.java | 23 + .../compute/outputs/GetInstanceResult.java | 4 +- .../gcp/compute/outputs/InstanceBootDisk.java | 21 + .../InstanceFromMachineImageBootDisk.java | 21 + .../outputs/InstanceFromTemplateBootDisk.java | 21 + .../outputs/InstanceNetworkInterface.java | 4 +- .../outputs/NodeTemplateAccelerator.java | 83 + .../pulumi/gcp/container/AttachedCluster.java | 4 +- .../gcp/container/AttachedClusterArgs.java | 8 +- .../inputs/AttachedClusterState.java | 8 +- .../ClusterNodeConfigKubeletConfigArgs.java | 37 + .../inputs/ClusterNodePoolAutoConfigArgs.java | 50 +- ...dePoolAutoConfigNodeKubeletConfigArgs.java | 83 + ...odePoolDefaultsNodeConfigDefaultsArgs.java | 37 + ...erNodePoolNodeConfigKubeletConfigArgs.java | 37 + .../NodePoolNodeConfigKubeletConfigArgs.java | 37 + .../ClusterNodeConfigKubeletConfig.java | 21 + .../outputs/ClusterNodePoolAutoConfig.java | 28 +- ...erNodePoolAutoConfigNodeKubeletConfig.java | 57 + ...terNodePoolDefaultsNodeConfigDefaults.java | 21 + ...lusterNodePoolNodeConfigKubeletConfig.java | 21 + .../GetClusterNodeConfigKubeletConfig.java | 23 + .../outputs/GetClusterNodePoolAutoConfig.java | 27 + ...erNodePoolAutoConfigNodeKubeletConfig.java | 58 + ...usterNodePoolDefaultNodeConfigDefault.java | 23 + ...lusterNodePoolNodeConfigKubeletConfig.java | 23 + .../NodePoolNodeConfigKubeletConfig.java | 21 + .../ConnectionProfile.java | 223 ++ .../inputs/ConnectionProfileMysqlArgs.java | 85 +- .../ConnectionProfilePostgresqlArgs.java | 122 +- .../outputs/ConnectionProfileMysql.java | 73 +- .../outputs/ConnectionProfilePostgresql.java | 94 +- .../PreventionDiscoveryConfigActionArgs.java | 42 + ...DiscoveryConfigActionTagResourcesArgs.java | 190 ++ ...figActionTagResourcesTagConditionArgs.java | 129 + ...urcesTagConditionSensitivityScoreArgs.java | 89 + ...ActionTagResourcesTagConditionTagArgs.java | 83 + ...ConfigTargetBigQueryTargetCadenceArgs.java | 42 + ...nceInspectTemplateModifiedCadenceArgs.java | 87 + ...etCloudSqlTargetGenerationCadenceArgs.java | 42 + ...nceInspectTemplateModifiedCadenceArgs.java | 89 + .../PreventionDiscoveryConfigAction.java | 24 + ...tionDiscoveryConfigActionTagResources.java | 112 + ...yConfigActionTagResourcesTagCondition.java | 83 + ...ResourcesTagConditionSensitivityScore.java | 60 + ...nfigActionTagResourcesTagConditionTag.java | 57 + ...veryConfigTargetBigQueryTargetCadence.java | 24 + ...CadenceInspectTemplateModifiedCadence.java | 59 + ...TargetCloudSqlTargetGenerationCadence.java | 24 + ...CadenceInspectTemplateModifiedCadence.java | 60 + ...latePlacementManagedClusterConfigArgs.java | 8 +- ...agedClusterConfigGceClusterConfigArgs.java | 18 +- ...ClusterConfigInitializationActionArgs.java | 8 +- ...nagedClusterConfigLifecycleConfigArgs.java | 32 +- ...tManagedClusterConfigMasterConfigArgs.java | 8 +- ...TemplatePlacementManagedClusterConfig.java | 4 +- ...tManagedClusterConfigGceClusterConfig.java | 8 +- ...agedClusterConfigInitializationAction.java | 4 +- ...ntManagedClusterConfigLifecycleConfig.java | 16 +- ...ementManagedClusterConfigMasterConfig.java | 4 +- .../com/pulumi/gcp/datastream/Stream.java | 154 +- .../com/pulumi/gcp/datastream/StreamArgs.java | 12 +- ...SourceConfigSqlServerSourceConfigArgs.java | 76 + ...SqlServerSourceConfigChangeTablesArgs.java | 28 + ...ServerSourceConfigTransactionLogsArgs.java | 28 + .../gcp/datastream/inputs/StreamState.java | 12 +- ...reamSourceConfigSqlServerSourceConfig.java | 44 + ...nfigSqlServerSourceConfigChangeTables.java | 32 + ...gSqlServerSourceConfigTransactionLogs.java | 32 + .../pulumi/gcp/discoveryengine/DataStore.java | 8 +- .../gcp/discoveryengine/DataStoreArgs.java | 18 +- ...DataStoreDocumentProcessingConfigArgs.java | 42 + ...entProcessingConfigChunkingConfigArgs.java | 87 + ...ngConfigLayoutBasedChunkingConfigArgs.java | 129 + ...cessingConfigDefaultParsingConfigArgs.java | 38 + ...tParsingConfigLayoutParsingConfigArgs.java | 28 + ...essingConfigParsingConfigOverrideArgs.java | 38 + ...ConfigOverrideLayoutParsingConfigArgs.java | 28 + .../inputs/DataStoreState.java | 18 +- .../DataStoreDocumentProcessingConfig.java | 24 + ...ocumentProcessingConfigChunkingConfig.java | 59 + ...unkingConfigLayoutBasedChunkingConfig.java | 83 + ...tProcessingConfigDefaultParsingConfig.java | 22 + ...faultParsingConfigLayoutParsingConfig.java | 32 + ...ProcessingConfigParsingConfigOverride.java | 22 + ...singConfigOverrideLayoutParsingConfig.java | 32 + .../pulumi/gcp/firebase/DatabaseInstance.java | 4 +- .../gcp/firebase/DatabaseInstanceArgs.java | 8 +- .../inputs/DatabaseInstanceState.java | 8 +- .../pulumi/gcp/gkehub/FeatureMembership.java | 9 +- ...FeatureMembershipConfigmanagementArgs.java | 38 +- .../FeatureMembershipConfigmanagement.java | 20 +- .../gcp/iam/WorkloadIdentityPoolProvider.java | 133 + .../iam/WorkloadIdentityPoolProviderArgs.java | 46 + .../WorkloadIdentityPoolProviderSamlArgs.java | 8 + .../WorkloadIdentityPoolProviderState.java | 46 + .../WorkloadIdentityPoolProviderX509Args.java | 101 + ...dentityPoolProviderX509TrustStoreArgs.java | 176 ++ ...viderX509TrustStoreIntermediateCaArgs.java | 87 + ...ProviderX509TrustStoreTrustAnchorArgs.java | 87 + ...GetWorkloadIdentityPoolProviderResult.java | 19 + .../GetWorkloadIdentityPoolProviderX509.java | 70 + ...oadIdentityPoolProviderX509TrustStore.java | 97 + ...lProviderX509TrustStoreIntermediateCa.java | 60 + ...PoolProviderX509TrustStoreTrustAnchor.java | 60 + .../WorkloadIdentityPoolProviderSaml.java | 4 + .../WorkloadIdentityPoolProviderX509.java | 66 + ...oadIdentityPoolProviderX509TrustStore.java | 100 + ...lProviderX509TrustStoreIntermediateCa.java | 59 + ...PoolProviderX509TrustStoreTrustAnchor.java | 59 + .../com/pulumi/gcp/kms/AutokeyConfig.java | 10 +- .../java/com/pulumi/gcp/kms/KmsFunctions.java | 462 +++ .../inputs/GetCryptoKeyLatestVersionArgs.java | 152 + .../GetCryptoKeyLatestVersionPlainArgs.java | 124 + .../kms/inputs/GetCryptoKeyVersionsArgs.java | 156 + .../inputs/GetCryptoKeyVersionsPlainArgs.java | 127 + .../GetCryptoKeyLatestVersionPublicKey.java | 81 + .../GetCryptoKeyLatestVersionResult.java | 216 ++ .../GetCryptoKeyVersionsPublicKey.java | 81 + .../outputs/GetCryptoKeyVersionsResult.java | 135 + .../outputs/GetCryptoKeyVersionsVersion.java | 179 ++ .../GetCryptoKeyVersionsVersionPublicKey.java | 81 + .../pulumi/gcp/netapp/ActiveDirectory.java | 6 +- .../java/com/pulumi/gcp/netapp/Backup.java | 6 +- .../com/pulumi/gcp/netapp/BackupPolicy.java | 6 +- .../com/pulumi/gcp/netapp/BackupVault.java | 6 +- .../com/pulumi/gcp/netapp/StoragePool.java | 4 +- .../java/com/pulumi/gcp/netapp/Volume.java | 2 + .../com/pulumi/gcp/netapp/VolumeArgs.java | 4 + .../pulumi/gcp/netapp/inputs/VolumeState.java | 4 + .../pulumi/gcp/networkconnectivity/Spoke.java | 3 + .../inputs/SpokeLinkedVpcNetworkArgs.java | 47 + .../outputs/SpokeLinkedVpcNetwork.java | 24 + .../gcp/networksecurity/ClientTlsPolicy.java | 22 +- .../gcp/networksecurity/ServerTlsPolicy.java | 25 +- .../com/pulumi/gcp/organizations/Project.java | 56 + .../pulumi/gcp/organizations/ProjectArgs.java | 37 + .../organizations/inputs/ProjectState.java | 37 + .../outputs/GetProjectResult.java | 15 + .../pulumi/gcp/parallelstore/Instance.java | 84 +- .../gcp/parallelstore/InstanceArgs.java | 136 +- .../parallelstore/inputs/InstanceState.java | 168 +- .../pulumi/gcp/projects/IamMemberRemove.java | 41 + .../gcp/projects/UsageExportBucket.java | 42 + .../com/pulumi/gcp/pubsub/Subscription.java | 3 + .../SubscriptionCloudStorageConfigArgs.java | 37 + ...ptionCloudStorageConfigAvroConfigArgs.java | 37 + .../GetSubscriptionCloudStorageConfig.java | 23 + ...scriptionCloudStorageConfigAvroConfig.java | 23 + .../SubscriptionCloudStorageConfig.java | 21 + ...scriptionCloudStorageConfigAvroConfig.java | 21 + .../java/com/pulumi/gcp/redis/Cluster.java | 56 + .../com/pulumi/gcp/redis/ClusterArgs.java | 38 + .../inputs/ClusterMaintenancePolicyArgs.java | 208 ++ ...ancePolicyWeeklyMaintenanceWindowArgs.java | 221 ++ ...yWeeklyMaintenanceWindowStartTimeArgs.java | 202 ++ .../ClusterMaintenanceScheduleArgs.java | 197 ++ .../pulumi/gcp/redis/inputs/ClusterState.java | 91 + .../outputs/ClusterMaintenancePolicy.java | 122 + ...ntenancePolicyWeeklyMaintenanceWindow.java | 133 + ...olicyWeeklyMaintenanceWindowStartTime.java | 124 + .../outputs/ClusterMaintenanceSchedule.java | 119 + .../V2FolderSccBigQueryExport.java | 388 +++ .../V2FolderSccBigQueryExportArgs.java | 375 +++ .../V2OrganizationSccBigQueryExports.java | 4 +- .../V2ProjectSccBigQueryExport.java | 335 ++ .../V2ProjectSccBigQueryExportArgs.java | 364 +++ .../V2FolderSccBigQueryExportState.java | 581 ++++ .../V2ProjectSccBigQueryExportState.java | 573 ++++ sdk/nodejs/alloydb/cluster.ts | 32 + sdk/nodejs/assuredworkloads/workload.ts | 65 +- .../backupdisasterrecovery/backupVault.ts | 464 +++ sdk/nodejs/backupdisasterrecovery/index.ts | 8 + sdk/nodejs/bigquery/dataTransferConfig.ts | 67 +- .../bigqueryanalyticshub/dataExchange.ts | 38 + sdk/nodejs/bigqueryanalyticshub/listing.ts | 61 + sdk/nodejs/bigtable/table.ts | 17 + sdk/nodejs/certificateauthority/authority.ts | 9 +- sdk/nodejs/certificatemanager/certificate.ts | 10 + .../certificatemanager/getCertificates.ts | 107 + sdk/nodejs/certificatemanager/index.ts | 5 + sdk/nodejs/cloudrunv2/service.ts | 30 +- sdk/nodejs/cloudtasks/queue.ts | 113 + sdk/nodejs/compute/getInstance.ts | 2 +- sdk/nodejs/compute/healthCheck.ts | 60 + sdk/nodejs/compute/instance.ts | 47 +- sdk/nodejs/compute/instanceTemplate.ts | 35 + sdk/nodejs/compute/interconnect.ts | 21 +- sdk/nodejs/compute/nodeTemplate.ts | 39 + sdk/nodejs/compute/targetHttpsProxy.ts | 12 + sdk/nodejs/container/attachedCluster.ts | 6 +- .../connectionProfile.ts | 116 + sdk/nodejs/datastream/stream.ts | 106 +- sdk/nodejs/discoveryengine/dataStore.ts | 12 +- sdk/nodejs/firebase/databaseInstance.ts | 6 +- sdk/nodejs/gkehub/featureMembership.ts | 9 +- .../iam/getWorkloadIdentityPoolProvider.ts | 1 + .../iam/workloadIdentityPoolProvider.ts | 78 + sdk/nodejs/kms/autokeyConfig.ts | 7 +- sdk/nodejs/kms/getCryptoKeyLatestVersion.ts | 141 + sdk/nodejs/kms/getCryptoKeyVersions.ts | 130 + sdk/nodejs/kms/index.ts | 10 + sdk/nodejs/netapp/activeDirectory.ts | 6 +- sdk/nodejs/netapp/backup.ts | 6 +- sdk/nodejs/netapp/backupPolicy.ts | 6 +- sdk/nodejs/netapp/backupVault.ts | 6 +- sdk/nodejs/netapp/storagePool.ts | 4 +- sdk/nodejs/netapp/volume.ts | 3 + sdk/nodejs/networkconnectivity/spoke.ts | 4 + sdk/nodejs/networksecurity/clientTlsPolicy.ts | 23 +- sdk/nodejs/networksecurity/serverTlsPolicy.ts | 26 +- sdk/nodejs/organizations/getProject.ts | 1 + sdk/nodejs/organizations/project.ts | 34 + sdk/nodejs/parallelstore/instance.ts | 106 +- sdk/nodejs/projects/iamMemberRemove.ts | 14 + sdk/nodejs/projects/usageExportBucket.ts | 20 + sdk/nodejs/pubsub/subscription.ts | 3 + sdk/nodejs/redis/cluster.ts | 48 + sdk/nodejs/securitycenter/index.ts | 16 + .../v2folderSccBigQueryExport.ts | 365 +++ .../v2organizationSccBigQueryExports.ts | 4 +- .../v2projectSccBigQueryExport.ts | 330 ++ sdk/nodejs/tsconfig.json | 6 + sdk/nodejs/types/input.ts | 653 +++- sdk/nodejs/types/output.ts | 927 +++++- sdk/python/pulumi_gcp/__init__.py | 24 + sdk/python/pulumi_gcp/alloydb/_inputs.py | 94 + sdk/python/pulumi_gcp/alloydb/cluster.py | 95 +- sdk/python/pulumi_gcp/alloydb/outputs.py | 79 + .../pulumi_gcp/assuredworkloads/workload.py | 151 +- .../backupdisasterrecovery/__init__.py | 1 + .../backupdisasterrecovery/backup_vault.py | 1203 ++++++++ sdk/python/pulumi_gcp/bigquery/_inputs.py | 33 + .../bigquery/data_transfer_config.py | 140 +- sdk/python/pulumi_gcp/bigquery/outputs.py | 36 + .../bigqueryanalyticshub/_inputs.py | 176 +- .../bigqueryanalyticshub/data_exchange.py | 99 +- .../bigqueryanalyticshub/listing.py | 114 + .../bigqueryanalyticshub/outputs.py | 143 +- sdk/python/pulumi_gcp/bigtable/_inputs.py | 22 +- sdk/python/pulumi_gcp/bigtable/outputs.py | 14 +- sdk/python/pulumi_gcp/bigtable/table.py | 34 + .../certificateauthority/authority.py | 21 +- .../pulumi_gcp/certificatemanager/__init__.py | 1 + .../certificatemanager/certificate.py | 28 + .../certificatemanager/get_certificates.py | 150 + .../pulumi_gcp/certificatemanager/outputs.py | 322 ++ sdk/python/pulumi_gcp/cloudbuild/_inputs.py | 12 +- sdk/python/pulumi_gcp/cloudbuild/outputs.py | 8 +- sdk/python/pulumi_gcp/cloudrun/_inputs.py | 18 +- sdk/python/pulumi_gcp/cloudrun/outputs.py | 24 +- sdk/python/pulumi_gcp/cloudrunv2/_inputs.py | 87 +- sdk/python/pulumi_gcp/cloudrunv2/outputs.py | 98 +- sdk/python/pulumi_gcp/cloudrunv2/service.py | 54 +- sdk/python/pulumi_gcp/cloudtasks/_inputs.py | 630 ++++ sdk/python/pulumi_gcp/cloudtasks/outputs.py | 479 +++ sdk/python/pulumi_gcp/cloudtasks/queue.py | 238 ++ sdk/python/pulumi_gcp/compute/_inputs.py | 138 +- sdk/python/pulumi_gcp/compute/get_instance.py | 2 +- sdk/python/pulumi_gcp/compute/health_check.py | 114 + sdk/python/pulumi_gcp/compute/instance.py | 90 +- .../pulumi_gcp/compute/instance_template.py | 66 + sdk/python/pulumi_gcp/compute/interconnect.py | 49 +- .../pulumi_gcp/compute/node_template.py | 93 + sdk/python/pulumi_gcp/compute/outputs.py | 114 +- .../pulumi_gcp/compute/target_https_proxy.py | 28 + sdk/python/pulumi_gcp/container/_inputs.py | 143 +- .../pulumi_gcp/container/attached_cluster.py | 14 +- sdk/python/pulumi_gcp/container/outputs.py | 176 +- .../databasemigrationservice/_inputs.py | 324 +- .../connection_profile.py | 206 ++ .../databasemigrationservice/outputs.py | 196 +- sdk/python/pulumi_gcp/dataloss/_inputs.py | 354 ++- sdk/python/pulumi_gcp/dataloss/outputs.py | 277 +- sdk/python/pulumi_gcp/dataproc/_inputs.py | 54 +- sdk/python/pulumi_gcp/dataproc/outputs.py | 36 +- sdk/python/pulumi_gcp/datastream/_inputs.py | 70 +- sdk/python/pulumi_gcp/datastream/outputs.py | 46 +- sdk/python/pulumi_gcp/datastream/stream.py | 201 +- .../pulumi_gcp/discoveryengine/_inputs.py | 188 ++ .../pulumi_gcp/discoveryengine/data_store.py | 28 +- .../pulumi_gcp/discoveryengine/outputs.py | 154 +- .../pulumi_gcp/firebase/database_instance.py | 14 +- sdk/python/pulumi_gcp/gkehub/_inputs.py | 26 +- .../pulumi_gcp/gkehub/feature_membership.py | 18 +- sdk/python/pulumi_gcp/gkehub/outputs.py | 18 +- sdk/python/pulumi_gcp/iam/_inputs.py | 196 ++ .../get_workload_identity_pool_provider.py | 16 +- sdk/python/pulumi_gcp/iam/outputs.py | 295 ++ .../iam/workload_identity_pool_provider.py | 167 +- sdk/python/pulumi_gcp/kms/__init__.py | 2 + sdk/python/pulumi_gcp/kms/autokey_config.py | 12 +- .../kms/get_crypto_key_latest_version.py | 222 ++ .../pulumi_gcp/kms/get_crypto_key_versions.py | 175 ++ sdk/python/pulumi_gcp/kms/outputs.py | 164 + .../pulumi_gcp/netapp/active_directory.py | 12 +- sdk/python/pulumi_gcp/netapp/backup.py | 12 +- sdk/python/pulumi_gcp/netapp/backup_policy.py | 12 +- sdk/python/pulumi_gcp/netapp/backup_vault.py | 12 +- sdk/python/pulumi_gcp/netapp/storage_pool.py | 8 +- sdk/python/pulumi_gcp/netapp/volume.py | 7 + .../pulumi_gcp/networkconnectivity/_inputs.py | 22 +- .../pulumi_gcp/networkconnectivity/outputs.py | 16 +- .../pulumi_gcp/networkconnectivity/spoke.py | 8 + .../networksecurity/client_tls_policy.py | 46 +- .../networksecurity/server_tls_policy.py | 52 +- .../pulumi_gcp/organizations/get_project.py | 16 +- .../pulumi_gcp/organizations/project.py | 91 +- .../pulumi_gcp/parallelstore/instance.py | 242 +- .../pulumi_gcp/projects/iam_member_remove.py | 26 + .../projects/usage_export_bucket.py | 38 + sdk/python/pulumi_gcp/pubsub/_inputs.py | 40 + sdk/python/pulumi_gcp/pubsub/outputs.py | 52 +- sdk/python/pulumi_gcp/pubsub/subscription.py | 6 + sdk/python/pulumi_gcp/redis/_inputs.py | 419 +++ sdk/python/pulumi_gcp/redis/cluster.py | 123 + sdk/python/pulumi_gcp/redis/outputs.py | 315 ++ .../pulumi_gcp/securitycenter/__init__.py | 2 + .../v2_folder_scc_big_query_export.py | 857 +++++ .../v2_organization_scc_big_query_exports.py | 8 +- .../v2_project_scc_big_query_export.py | 796 +++++ upstream | 2 +- 828 files changed, 60711 insertions(+), 3095 deletions(-) create mode 100644 sdk/dotnet/Alloydb/Inputs/ClusterTrialMetadataArgs.cs create mode 100644 sdk/dotnet/Alloydb/Inputs/ClusterTrialMetadataGetArgs.cs create mode 100644 sdk/dotnet/Alloydb/Outputs/ClusterTrialMetadata.cs create mode 100644 sdk/dotnet/BackupDisasterRecovery/BackupVault.cs create mode 100644 sdk/dotnet/BigQuery/Inputs/DataTransferConfigEncryptionConfigurationArgs.cs create mode 100644 sdk/dotnet/BigQuery/Inputs/DataTransferConfigEncryptionConfigurationGetArgs.cs create mode 100644 sdk/dotnet/BigQuery/Outputs/DataTransferConfigEncryptionConfiguration.cs create mode 100644 sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigArgs.cs create mode 100644 sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs.cs create mode 100644 sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfigGetArgs.cs create mode 100644 sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs.cs create mode 100644 sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfigGetArgs.cs create mode 100644 sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigGetArgs.cs create mode 100644 sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetSelectedResourceArgs.cs create mode 100644 sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetSelectedResourceGetArgs.cs create mode 100644 sdk/dotnet/BigQueryAnalyticsHub/Outputs/DataExchangeSharingEnvironmentConfig.cs create mode 100644 sdk/dotnet/BigQueryAnalyticsHub/Outputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfig.cs create mode 100644 sdk/dotnet/BigQueryAnalyticsHub/Outputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfig.cs create mode 100644 sdk/dotnet/BigQueryAnalyticsHub/Outputs/ListingBigqueryDatasetSelectedResource.cs create mode 100644 sdk/dotnet/CertificateManager/GetCertificates.cs create mode 100644 sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateManagedAuthorizationAttemptInfoResult.cs create mode 100644 sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateManagedProvisioningIssueResult.cs create mode 100644 sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateManagedResult.cs create mode 100644 sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateResult.cs create mode 100644 sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateServiceMeshArgs.cs create mode 100644 sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateServiceMeshGetArgs.cs create mode 100644 sdk/dotnet/CloudRunV2/Outputs/GetServiceTemplateServiceMeshResult.cs create mode 100644 sdk/dotnet/CloudRunV2/Outputs/ServiceTemplateServiceMesh.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetGetArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideGetArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideHeaderArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideHeaderGetArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOauthTokenArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOauthTokenGetArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOidcTokenArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOidcTokenGetArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideGetArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverridePathOverrideArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverridePathOverrideGetArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideQueryOverrideArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideQueryOverrideGetArgs.cs create mode 100644 sdk/dotnet/CloudTasks/Outputs/QueueHttpTarget.cs create mode 100644 sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetHeaderOverride.cs create mode 100644 sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetHeaderOverrideHeader.cs create mode 100644 sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetOauthToken.cs create mode 100644 sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetOidcToken.cs create mode 100644 sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetUriOverride.cs create mode 100644 sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetUriOverridePathOverride.cs create mode 100644 sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetUriOverrideQueryOverride.cs create mode 100644 sdk/dotnet/Compute/Inputs/NodeTemplateAcceleratorArgs.cs create mode 100644 sdk/dotnet/Compute/Inputs/NodeTemplateAcceleratorGetArgs.cs create mode 100644 sdk/dotnet/Compute/Outputs/NodeTemplateAccelerator.cs create mode 100644 sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigNodeKubeletConfigArgs.cs create mode 100644 sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigNodeKubeletConfigGetArgs.cs create mode 100644 sdk/dotnet/Container/Outputs/ClusterNodePoolAutoConfigNodeKubeletConfig.cs create mode 100644 sdk/dotnet/Container/Outputs/GetClusterNodePoolAutoConfigNodeKubeletConfigResult.cs create mode 100644 sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesArgs.cs create mode 100644 sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesGetArgs.cs create mode 100644 sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionArgs.cs create mode 100644 sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionGetArgs.cs create mode 100644 sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs.cs create mode 100644 sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreGetArgs.cs create mode 100644 sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs.cs create mode 100644 sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTagGetArgs.cs create mode 100644 sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs.cs create mode 100644 sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceGetArgs.cs create mode 100644 sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs.cs create mode 100644 sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceGetArgs.cs create mode 100644 sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResources.cs create mode 100644 sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResourcesTagCondition.cs create mode 100644 sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore.cs create mode 100644 sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTag.cs create mode 100644 sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence.cs create mode 100644 sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence.cs create mode 100644 sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigChangeTablesArgs.cs create mode 100644 sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigChangeTablesGetArgs.cs create mode 100644 sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs.cs create mode 100644 sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigTransactionLogsGetArgs.cs create mode 100644 sdk/dotnet/Datastream/Outputs/StreamSourceConfigSqlServerSourceConfigChangeTables.cs create mode 100644 sdk/dotnet/Datastream/Outputs/StreamSourceConfigSqlServerSourceConfigTransactionLogs.cs create mode 100644 sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigArgs.cs create mode 100644 sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigGetArgs.cs create mode 100644 sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs.cs create mode 100644 sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigGetArgs.cs create mode 100644 sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs.cs create mode 100644 sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigGetArgs.cs create mode 100644 sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs.cs create mode 100644 sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigGetArgs.cs create mode 100644 sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigChunkingConfig.cs create mode 100644 sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig.cs create mode 100644 sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig.cs create mode 100644 sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig.cs create mode 100644 sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509Args.cs create mode 100644 sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509GetArgs.cs create mode 100644 sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreArgs.cs create mode 100644 sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreGetArgs.cs create mode 100644 sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs.cs create mode 100644 sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaGetArgs.cs create mode 100644 sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs.cs create mode 100644 sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorGetArgs.cs create mode 100644 sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509Result.cs create mode 100644 sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaResult.cs create mode 100644 sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509TrustStoreResult.cs create mode 100644 sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorResult.cs create mode 100644 sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509.cs create mode 100644 sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509TrustStore.cs create mode 100644 sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa.cs create mode 100644 sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor.cs create mode 100644 sdk/dotnet/Kms/GetCryptoKeyLatestVersion.cs create mode 100644 sdk/dotnet/Kms/GetCryptoKeyVersions.cs create mode 100644 sdk/dotnet/Kms/Outputs/GetCryptoKeyLatestVersionPublicKeyResult.cs create mode 100644 sdk/dotnet/Kms/Outputs/GetCryptoKeyVersionsPublicKeyResult.cs create mode 100644 sdk/dotnet/Kms/Outputs/GetCryptoKeyVersionsVersionPublicKeyResult.cs create mode 100644 sdk/dotnet/Kms/Outputs/GetCryptoKeyVersionsVersionResult.cs create mode 100644 sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyArgs.cs create mode 100644 sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyGetArgs.cs create mode 100644 sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs.cs create mode 100644 sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowGetArgs.cs create mode 100644 sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs.cs create mode 100644 sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeGetArgs.cs create mode 100644 sdk/dotnet/Redis/Inputs/ClusterMaintenanceScheduleArgs.cs create mode 100644 sdk/dotnet/Redis/Inputs/ClusterMaintenanceScheduleGetArgs.cs create mode 100644 sdk/dotnet/Redis/Outputs/ClusterMaintenancePolicy.cs create mode 100644 sdk/dotnet/Redis/Outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindow.cs create mode 100644 sdk/dotnet/Redis/Outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime.cs create mode 100644 sdk/dotnet/Redis/Outputs/ClusterMaintenanceSchedule.cs create mode 100644 sdk/dotnet/SecurityCenter/V2FolderSccBigQueryExport.cs create mode 100644 sdk/dotnet/SecurityCenter/V2ProjectSccBigQueryExport.cs create mode 100644 sdk/go/gcp/backupdisasterrecovery/backupVault.go create mode 100644 sdk/go/gcp/certificatemanager/getCertificates.go create mode 100644 sdk/go/gcp/kms/getCryptoKeyLatestVersion.go create mode 100644 sdk/go/gcp/kms/getCryptoKeyVersions.go create mode 100644 sdk/go/gcp/securitycenter/v2folderSccBigQueryExport.go create mode 100644 sdk/go/gcp/securitycenter/v2projectSccBigQueryExport.go create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/alloydb/inputs/ClusterTrialMetadataArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/alloydb/outputs/ClusterTrialMetadata.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/backupdisasterrecovery/BackupVault.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/backupdisasterrecovery/BackupVaultArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/backupdisasterrecovery/inputs/BackupVaultState.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/bigquery/inputs/DataTransferConfigEncryptionConfigurationArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/bigquery/outputs/DataTransferConfigEncryptionConfiguration.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeSharingEnvironmentConfigArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/ListingBigqueryDatasetSelectedResourceArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/DataExchangeSharingEnvironmentConfig.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfig.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfig.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/ListingBigqueryDatasetSelectedResource.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/inputs/GetCertificatesArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/inputs/GetCertificatesPlainArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificate.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificateManaged.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificateManagedAuthorizationAttemptInfo.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificateManagedProvisioningIssue.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesResult.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateServiceMeshArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetServiceTemplateServiceMesh.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplateServiceMesh.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetHeaderOverrideArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetHeaderOverrideHeaderArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetOauthTokenArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetOidcTokenArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetUriOverrideArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetUriOverridePathOverrideArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetUriOverrideQueryOverrideArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTarget.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetHeaderOverride.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetHeaderOverrideHeader.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetOauthToken.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetOidcToken.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetUriOverride.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetUriOverridePathOverride.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetUriOverrideQueryOverride.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/NodeTemplateAcceleratorArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/NodeTemplateAccelerator.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolAutoConfigNodeKubeletConfigArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolAutoConfigNodeKubeletConfig.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolAutoConfigNodeKubeletConfig.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResources.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResourcesTagCondition.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTag.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamSourceConfigSqlServerSourceConfigChangeTablesArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/datastream/outputs/StreamSourceConfigSqlServerSourceConfigChangeTables.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/datastream/outputs/StreamSourceConfigSqlServerSourceConfigTransactionLogs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigChunkingConfigArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigChunkingConfig.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509Args.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509TrustStoreArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509TrustStore.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509TrustStore.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyLatestVersionArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyLatestVersionPlainArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyVersionsArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyVersionsPlainArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyLatestVersionPublicKey.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyLatestVersionResult.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsPublicKey.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsResult.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsVersion.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsVersionPublicKey.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenancePolicyArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenanceScheduleArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenancePolicy.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindow.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenanceSchedule.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2FolderSccBigQueryExport.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2FolderSccBigQueryExportArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2ProjectSccBigQueryExport.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2ProjectSccBigQueryExportArgs.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/securitycenter/inputs/V2FolderSccBigQueryExportState.java create mode 100644 sdk/java/src/main/java/com/pulumi/gcp/securitycenter/inputs/V2ProjectSccBigQueryExportState.java create mode 100644 sdk/nodejs/backupdisasterrecovery/backupVault.ts create mode 100644 sdk/nodejs/certificatemanager/getCertificates.ts create mode 100644 sdk/nodejs/kms/getCryptoKeyLatestVersion.ts create mode 100644 sdk/nodejs/kms/getCryptoKeyVersions.ts create mode 100644 sdk/nodejs/securitycenter/v2folderSccBigQueryExport.ts create mode 100644 sdk/nodejs/securitycenter/v2projectSccBigQueryExport.ts create mode 100644 sdk/python/pulumi_gcp/backupdisasterrecovery/backup_vault.py create mode 100644 sdk/python/pulumi_gcp/certificatemanager/get_certificates.py create mode 100644 sdk/python/pulumi_gcp/kms/get_crypto_key_latest_version.py create mode 100644 sdk/python/pulumi_gcp/kms/get_crypto_key_versions.py create mode 100644 sdk/python/pulumi_gcp/securitycenter/v2_folder_scc_big_query_export.py create mode 100644 sdk/python/pulumi_gcp/securitycenter/v2_project_scc_big_query_export.py diff --git a/.pulumi-java-gen.version b/.pulumi-java-gen.version index 7092c7c46f..92e0c7438b 100644 --- a/.pulumi-java-gen.version +++ b/.pulumi-java-gen.version @@ -1 +1 @@ -0.15.0 \ No newline at end of file +0.16.1 \ No newline at end of file diff --git a/patches/0003-rebase-bigquery_dataset.patch b/patches/0003-rebase-bigquery_dataset.patch index c720a098a2..7c26c2aa92 100644 --- a/patches/0003-rebase-bigquery_dataset.patch +++ b/patches/0003-rebase-bigquery_dataset.patch @@ -5,7 +5,7 @@ Subject: [PATCH] rebase bigquery_dataset diff --git a/google-beta/services/bigquery/resource_bigquery_dataset.go b/google-beta/services/bigquery/resource_bigquery_dataset.go -index 762492879..620b42fff 100644 +index 4d7ec2e26..dd8497c44 100644 --- a/google-beta/services/bigquery/resource_bigquery_dataset.go +++ b/google-beta/services/bigquery/resource_bigquery_dataset.go @@ -729,8 +729,13 @@ func resourceBigQueryDatasetRead(d *schema.ResourceData, meta interface{}) error diff --git a/patches/0004-website-docs-d-tweaks.patch b/patches/0004-website-docs-d-tweaks.patch index bc04dc17f2..0918bf7e95 100644 --- a/patches/0004-website-docs-d-tweaks.patch +++ b/patches/0004-website-docs-d-tweaks.patch @@ -47,7 +47,7 @@ index 50f04270d..87cb31d7c 100644 -See [google_compute_global_forwarding_rule](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_global_forwarding_rule) resource for details of the available attributes. +See google_compute_global_forwarding_rule resource for details of the available attributes. diff --git a/website/docs/d/compute_instance.html.markdown b/website/docs/d/compute_instance.html.markdown -index 2739af406..dd2ff2598 100644 +index c52917b0f..62383a602 100644 --- a/website/docs/d/compute_instance.html.markdown +++ b/website/docs/d/compute_instance.html.markdown @@ -11,7 +11,6 @@ Get information about a VM instance resource within GCE. For more information se diff --git a/patches/0005-docs-patching.patch b/patches/0005-docs-patching.patch index f4de7e625d..de3714ca0d 100644 --- a/patches/0005-docs-patching.patch +++ b/patches/0005-docs-patching.patch @@ -280,7 +280,7 @@ index 17a16b9f2..34943f39b 100644 ## Attributes Reference diff --git a/website/docs/r/bigtable_table.html.markdown b/website/docs/r/bigtable_table.html.markdown -index 5d6c24289..2f0aaa5b0 100644 +index 33185542a..1d8219fe6 100644 --- a/website/docs/r/bigtable_table.html.markdown +++ b/website/docs/r/bigtable_table.html.markdown @@ -10,12 +10,6 @@ Creates a Google Cloud Bigtable table inside an instance. For more information s @@ -296,7 +296,7 @@ index 5d6c24289..2f0aaa5b0 100644 ## Example Usage ```hcl -@@ -69,7 +63,7 @@ The following arguments are supported: +@@ -88,7 +82,7 @@ The following arguments are supported: * `instance_name` - (Required) The name of the Bigtable instance. * `split_keys` - (Optional) A list of predefined keys to split the table on. @@ -319,7 +319,7 @@ index 78a591547..bbb602499 100644 ## Attributes Reference diff --git a/website/docs/r/certificate_manager_certificate.html.markdown b/website/docs/r/certificate_manager_certificate.html.markdown -index 81b3bf64f..21f87225b 100644 +index 48be459a6..4b2a474e7 100644 --- a/website/docs/r/certificate_manager_certificate.html.markdown +++ b/website/docs/r/certificate_manager_certificate.html.markdown @@ -156,19 +156,38 @@ resource "google_privateca_certificate_authority" "ca_authority" { @@ -388,7 +388,7 @@ index a68f58813..650247567 100644 **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. Please refer to the field `effective_annotations` for all of the annotations present on the resource. diff --git a/website/docs/r/cloud_run_service.html.markdown b/website/docs/r/cloud_run_service.html.markdown -index 24fc2761d..5e9d1f68d 100644 +index db315a058..05482b44b 100644 --- a/website/docs/r/cloud_run_service.html.markdown +++ b/website/docs/r/cloud_run_service.html.markdown @@ -31,8 +31,63 @@ To get more information about Service, see: @@ -484,7 +484,7 @@ index 24fc2761d..5e9d1f68d 100644 Structure is [documented below](#nested_liveness_probe). -@@ -998,7 +1054,7 @@ this field is set to false, the revision name will still autogenerate.) +@@ -996,7 +1052,7 @@ this field is set to false, the revision name will still autogenerate.) may be set by external tools to store and retrieve arbitrary metadata. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations **Note**: The Cloud Run API may add additional annotations that were not provided in your config. @@ -507,7 +507,7 @@ index c3e738686..4ab0fb25d 100644 * `project` - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. diff --git a/website/docs/r/composer_environment.html.markdown b/website/docs/r/composer_environment.html.markdown -index bc7b99b97..2e32cd9b9 100644 +index 254316ed9..739d098c9 100644 --- a/website/docs/r/composer_environment.html.markdown +++ b/website/docs/r/composer_environment.html.markdown @@ -31,24 +31,21 @@ To get more information about Environments, see: @@ -682,10 +682,10 @@ index f9cad9040..243ba6fd0 100644 ## Attributes Reference diff --git a/website/docs/r/compute_instance.html.markdown b/website/docs/r/compute_instance.html.markdown -index fa523ea69..54049658d 100644 +index 20bd5f026..4337c5e3b 100644 --- a/website/docs/r/compute_instance.html.markdown +++ b/website/docs/r/compute_instance.html.markdown -@@ -87,7 +87,7 @@ The following arguments are supported: +@@ -138,7 +138,7 @@ The following arguments are supported: - - - @@ -694,7 +694,7 @@ index fa523ea69..54049658d 100644 If you try to update a property that requires stopping the instance without setting this field, the update will fail. * `attached_disk` - (Optional) Additional disks to attach to the instance. Can be repeated multiple times for multiple disks. Structure is [documented below](#nested_attached_disk). -@@ -102,7 +102,7 @@ The following arguments are supported: +@@ -153,7 +153,7 @@ The following arguments are supported: `"RUNNING"` or `"TERMINATED"`. * `deletion_protection` - (Optional) Enable deletion protection on this instance. Defaults to false. @@ -703,7 +703,7 @@ index fa523ea69..54049658d 100644 * `hostname` - (Optional) A custom hostname for the instance. Must be a fully qualified DNS name and RFC-1035-valid. Valid format is a series of labels 1-63 characters long matching the regular expression `[a-z]([-a-z0-9]*[a-z0-9])`, concatenated with periods. -@@ -110,11 +110,6 @@ The following arguments are supported: +@@ -161,11 +161,6 @@ The following arguments are supported: * `guest_accelerator` - (Optional) List of the type and count of accelerator cards attached to the instance. Structure [documented below](#nested_guest_accelerator). **Note:** GPU accelerators can only be used with [`on_host_maintenance`](#on_host_maintenance) option set to TERMINATE. @@ -715,7 +715,7 @@ index fa523ea69..54049658d 100644 * `labels` - (Optional) A map of key/value label pairs to assign to the instance. **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. -@@ -149,8 +144,7 @@ only distinction is that this separate attribute will cause a recreate on +@@ -200,8 +195,7 @@ only distinction is that this separate attribute will cause a recreate on modification. On import, `metadata_startup_script` will not be set - if you choose to specify it you will see a diff immediately after import causing a destroy/recreate operation. If importing an instance and specifying this value @@ -811,7 +811,7 @@ index 5d846e846..8dda2ecb4 100644 instances = [ google_compute_instance.test.id, diff --git a/website/docs/r/compute_instance_group_manager.html.markdown b/website/docs/r/compute_instance_group_manager.html.markdown -index 49cb8ccb6..992998e68 100644 +index 4492397f2..26ec08096 100644 --- a/website/docs/r/compute_instance_group_manager.html.markdown +++ b/website/docs/r/compute_instance_group_manager.html.markdown @@ -165,7 +165,7 @@ The following arguments are supported: @@ -839,7 +839,7 @@ index fe6888f17..64fce6636 100644 ## Attributes Reference diff --git a/website/docs/r/compute_instance_template.html.markdown b/website/docs/r/compute_instance_template.html.markdown -index cd97bf440..6b71a84cb 100644 +index 5d66dca4c..7f60e238f 100644 --- a/website/docs/r/compute_instance_template.html.markdown +++ b/website/docs/r/compute_instance_template.html.markdown @@ -183,12 +183,11 @@ resource "google_compute_instance_template" "foobar" { @@ -868,7 +868,7 @@ index cd97bf440..6b71a84cb 100644 Template and can then update the Instance Group manager without conflict before destroying the previous Instance Template. -@@ -228,17 +227,17 @@ destroying the previous Instance Template. +@@ -269,17 +268,17 @@ resource "google_compute_instance_template" "confidential_instance_template" { A common way to use instance templates and managed instance groups is to deploy the latest image in a family, usually the latest build of your application. There are two @@ -892,7 +892,7 @@ index cd97bf440..6b71a84cb 100644 the template to use that specific image: ```tf -@@ -291,8 +290,9 @@ The following arguments are supported: +@@ -332,8 +331,9 @@ The following arguments are supported: To create a machine with a [custom type](https://cloud.google.com/dataproc/docs/concepts/compute/custom-machine-types) (such as extended memory), format the value like `custom-VCPUS-MEM_IN_MB` like `custom-6-20480` for 6 vCPU and 20GB of RAM. - - - @@ -903,7 +903,7 @@ index cd97bf440..6b71a84cb 100644 * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. Max length is 54 characters. -@@ -510,7 +510,7 @@ The following arguments are supported: +@@ -551,7 +551,7 @@ The following arguments are supported: * `access_config` - (Optional) Access configurations, i.e. IPs via which this instance can be accessed via the Internet. Omit to ensure that the instance is not accessible from the Internet (this means that ssh provisioners will @@ -1145,7 +1145,7 @@ index db7f57628..dceb98ac2 100644 ```hcl diff --git a/website/docs/r/compute_region_instance_group_manager.html.markdown b/website/docs/r/compute_region_instance_group_manager.html.markdown -index e0074ea64..2448154fd 100644 +index cdc4e9963..0f0c5cb65 100644 --- a/website/docs/r/compute_region_instance_group_manager.html.markdown +++ b/website/docs/r/compute_region_instance_group_manager.html.markdown @@ -166,7 +166,7 @@ The following arguments are supported: @@ -1595,7 +1595,7 @@ index f18c91533..924ad4cc0 100644
diff --git a/website/docs/r/container_cluster.html.markdown b/website/docs/r/container_cluster.html.markdown -index b11194f59..865b675aa 100644 +index a6e83f152..2bc957a4f 100644 --- a/website/docs/r/container_cluster.html.markdown +++ b/website/docs/r/container_cluster.html.markdown @@ -13,15 +13,12 @@ To get more information about GKE clusters, see: @@ -1759,7 +1759,7 @@ index b11194f59..865b675aa 100644 The `ephemeral_storage_config` block supports: * `local_ssd_count` (Required) - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to disable using local SSDs as ephemeral storage. -@@ -1171,7 +1199,7 @@ for more details. This field only applies to private clusters, when +@@ -1180,7 +1208,7 @@ for more details. This field only applies to private clusters, when * `private_endpoint_subnetwork` - (Optional) Subnetwork in cluster's network where master's endpoint will be provisioned. * `master_global_access_config` (Optional) - Controls cluster master global @@ -1768,7 +1768,7 @@ index b11194f59..865b675aa 100644 not modify the previously-set value. Structure is [documented below](#nested_master_global_access_config). In addition, the `private_cluster_config` allows access to the following read-only fields: -@@ -1273,9 +1301,9 @@ Enables monitoring and attestation of the boot integrity of the instance. The at +@@ -1282,9 +1310,9 @@ Enables monitoring and attestation of the boot integrity of the instance. The at * `mode` (Required) How to expose the node metadata to the workload running on the node. Accepted values are: @@ -2691,10 +2691,10 @@ index 1e419d9e4..16c9465d2 100644 ## Example Usage diff --git a/website/docs/r/google_project.html.markdown b/website/docs/r/google_project.html.markdown -index 6d45074bb..81d97bb62 100644 +index fea020389..e8792e772 100644 --- a/website/docs/r/google_project.html.markdown +++ b/website/docs/r/google_project.html.markdown -@@ -11,15 +11,13 @@ Allows creation and management of a Google Cloud Platform project. +@@ -11,7 +11,7 @@ Allows creation and management of a Google Cloud Platform project. Projects created with this resource must be associated with an Organization. See the [Organization documentation](https://cloud.google.com/resource-manager/docs/quickstarts) for more details. @@ -2703,15 +2703,7 @@ index 6d45074bb..81d97bb62 100644 resource must have `roles/resourcemanager.projectCreator` on the specified organization. See the [Access Control for Organizations Using IAM](https://cloud.google.com/resource-manager/docs/access-control-org) doc for more information. - - ~> This resource reads the specified billing account on every terraform apply and plan operation so you must have permissions on the specified billing account. - --~> It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `auto_create_network` to false, when possible. -- - To get more information about projects, see: - - * [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects) -@@ -73,8 +71,8 @@ The following arguments are supported: +@@ -86,8 +86,8 @@ The following arguments are supported: project to be migrated to the newly specified folder. * `billing_account` - (Optional) The alphanumeric ID of the billing account this project @@ -3827,7 +3819,7 @@ index 368f1e7fd..57ee39c9d 100644 Only the arguments listed above are exposed as attributes. diff --git a/website/docs/r/tpu_node.html.markdown b/website/docs/r/tpu_node.html.markdown -index fa6b38f47..751e0a8fb 100644 +index 3420dc7dc..9af308f7c 100644 --- a/website/docs/r/tpu_node.html.markdown +++ b/website/docs/r/tpu_node.html.markdown @@ -66,7 +66,7 @@ resource "google_tpu_node" "tpu" { @@ -3837,5 +3829,5 @@ index fa6b38f47..751e0a8fb 100644 - description = "Terraform Google Provider test TPU" + description = "Google Provider test TPU" use_service_networking = true - network = google_service_networking_connection.private_service_connection.network + network = google_service_networking_connection.private_service_connection.network diff --git a/patches/0009-Bucket-Skip-default-labels-for-Import-and-Create.patch b/patches/0009-Bucket-Skip-default-labels-for-Import-and-Create.patch index 44d3da934c..9e7ee02fed 100644 --- a/patches/0009-Bucket-Skip-default-labels-for-Import-and-Create.patch +++ b/patches/0009-Bucket-Skip-default-labels-for-Import-and-Create.patch @@ -5,10 +5,10 @@ Subject: [PATCH] Bucket: Skip default labels for Import and Create diff --git a/google-beta/services/storage/resource_storage_bucket.go b/google-beta/services/storage/resource_storage_bucket.go -index e4be7399d..8fbeee2ab 100644 +index 2cbd3ab79..22c461cf6 100644 --- a/google-beta/services/storage/resource_storage_bucket.go +++ b/google-beta/services/storage/resource_storage_bucket.go -@@ -1858,10 +1858,10 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res +@@ -1859,10 +1859,10 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res if err := d.Set("lifecycle_rule", flattenBucketLifecycle(d, res.Lifecycle)); err != nil { return fmt.Errorf("Error setting lifecycle_rule: %s", err) } diff --git a/patches/0010-Rename-default-provisioning-label-to-goog-pulumi-pro.patch b/patches/0010-Rename-default-provisioning-label-to-goog-pulumi-pro.patch index 0d7eb2f3b8..628e9588ee 100644 --- a/patches/0010-Rename-default-provisioning-label-to-goog-pulumi-pro.patch +++ b/patches/0010-Rename-default-provisioning-label-to-goog-pulumi-pro.patch @@ -5,10 +5,10 @@ Subject: [PATCH] Rename default provisioning label to goog-pulumi-provisioned diff --git a/google-beta/transport/config.go b/google-beta/transport/config.go -index 2c93a76fb..13baca8ef 100644 +index 0cad3b9d7..db75b183d 100644 --- a/google-beta/transport/config.go +++ b/google-beta/transport/config.go -@@ -642,7 +642,7 @@ var DefaultClientScopes = []string{ +@@ -643,7 +643,7 @@ var DefaultClientScopes = []string{ "https://www.googleapis.com/auth/userinfo.email", } diff --git a/provider/cmd/pulumi-resource-gcp/bridge-metadata.json b/provider/cmd/pulumi-resource-gcp/bridge-metadata.json index 5b1613ed9f..37117cfa06 100644 --- a/provider/cmd/pulumi-resource-gcp/bridge-metadata.json +++ b/provider/cmd/pulumi-resource-gcp/bridge-metadata.json @@ -1057,6 +1057,9 @@ }, "secondary_config": { "maxItemsOne": true + }, + "trial_metadata": { + "maxItemsOne": false } } }, @@ -2097,6 +2100,10 @@ } } }, + "google_backup_dr_backup_vault": { + "current": "gcp:backupdisasterrecovery/backupVault:BackupVault", + "majorVersion": 8 + }, "google_backup_dr_management_server": { "current": "gcp:backupdisasterrecovery/managementServer:ManagementServer", "majorVersion": 8, @@ -2180,7 +2187,22 @@ }, "google_bigquery_analytics_hub_data_exchange": { "current": "gcp:bigqueryanalyticshub/dataExchange:DataExchange", - "majorVersion": 8 + "majorVersion": 8, + "fields": { + "sharing_environment_config": { + "maxItemsOne": true, + "elem": { + "fields": { + "dcr_exchange_config": { + "maxItemsOne": true + }, + "default_exchange_config": { + "maxItemsOne": true + } + } + } + } + } }, "google_bigquery_analytics_hub_data_exchange_iam_binding": { "current": "gcp:bigqueryanalyticshub/dataExchangeIamBinding:DataExchangeIamBinding", @@ -2212,7 +2234,14 @@ "majorVersion": 8, "fields": { "bigquery_dataset": { - "maxItemsOne": true + "maxItemsOne": true, + "elem": { + "fields": { + "selected_resources": { + "maxItemsOne": false + } + } + } }, "categories": { "maxItemsOne": false @@ -2346,6 +2375,9 @@ "email_preferences": { "maxItemsOne": true }, + "encryption_configuration": { + "maxItemsOne": true + }, "schedule_options": { "maxItemsOne": true }, @@ -3137,6 +3169,9 @@ } } }, + "san_dnsnames": { + "maxItemsOne": false + }, "self_managed": { "maxItemsOne": true } @@ -3835,6 +3870,9 @@ "scaling": { "maxItemsOne": true }, + "service_mesh": { + "maxItemsOne": true + }, "volumes": { "maxItemsOne": false, "elem": { @@ -3969,6 +4007,42 @@ "app_engine_routing_override": { "maxItemsOne": true }, + "http_target": { + "maxItemsOne": true, + "elem": { + "fields": { + "header_overrides": { + "maxItemsOne": false, + "elem": { + "fields": { + "header": { + "maxItemsOne": true + } + } + } + }, + "oauth_token": { + "maxItemsOne": true + }, + "oidc_token": { + "maxItemsOne": true + }, + "uri_override": { + "maxItemsOne": true, + "elem": { + "fields": { + "path_override": { + "maxItemsOne": true + }, + "query_override": { + "maxItemsOne": true + } + } + } + } + } + } + }, "rate_limits": { "maxItemsOne": true }, @@ -6569,6 +6643,9 @@ "current": "gcp:compute/nodeTemplate:NodeTemplate", "majorVersion": 8, "fields": { + "accelerators": { + "maxItemsOne": false + }, "node_type_flexibility": { "maxItemsOne": true }, @@ -10236,6 +10313,9 @@ } } } + }, + "node_kubelet_config": { + "maxItemsOne": true } } } @@ -11829,6 +11909,29 @@ } } } + }, + "tag_resources": { + "maxItemsOne": true, + "elem": { + "fields": { + "profile_generations_to_tag": { + "maxItemsOne": false + }, + "tag_conditions": { + "maxItemsOne": false, + "elem": { + "fields": { + "sensitivity_score": { + "maxItemsOne": true + }, + "tag": { + "maxItemsOne": true + } + } + } + } + } + } } } } @@ -11875,6 +11978,9 @@ "maxItemsOne": true, "elem": { "fields": { + "inspect_template_modified_cadence": { + "maxItemsOne": true + }, "schema_modified_cadence": { "maxItemsOne": true, "elem": { @@ -12008,6 +12114,9 @@ "maxItemsOne": true, "elem": { "fields": { + "inspect_template_modified_cadence": { + "maxItemsOne": true + }, "schema_modified_cadence": { "maxItemsOne": true, "elem": { @@ -15058,6 +15167,9 @@ "maxItemsOne": true, "elem": { "fields": { + "change_tables": { + "maxItemsOne": true + }, "exclude_objects": { "maxItemsOne": true, "elem": { @@ -15105,6 +15217,9 @@ } } } + }, + "transaction_logs": { + "maxItemsOne": true } } } @@ -15936,6 +16051,16 @@ "maxItemsOne": true, "elem": { "fields": { + "chunking_config": { + "maxItemsOne": true, + "elem": { + "fields": { + "layout_based_chunking_config": { + "maxItemsOne": true + } + } + } + }, "default_parsing_config": { "maxItemsOne": true, "elem": { @@ -15943,6 +16068,9 @@ "digital_parsing_config": { "maxItemsOne": true }, + "layout_parsing_config": { + "maxItemsOne": true + }, "ocr_parsing_config": { "maxItemsOne": true } @@ -15956,6 +16084,9 @@ "digital_parsing_config": { "maxItemsOne": true }, + "layout_parsing_config": { + "maxItemsOne": true + }, "ocr_parsing_config": { "maxItemsOne": true } @@ -18839,6 +18970,26 @@ }, "saml": { "maxItemsOne": true + }, + "x509": { + "maxItemsOne": true, + "elem": { + "fields": { + "trust_store": { + "maxItemsOne": true, + "elem": { + "fields": { + "intermediate_cas": { + "maxItemsOne": false + }, + "trust_anchors": { + "maxItemsOne": false + } + } + } + } + } + } } } }, @@ -20795,6 +20946,9 @@ "fields": { "exclude_export_ranges": { "maxItemsOne": false + }, + "include_export_ranges": { + "maxItemsOne": false } } } @@ -23908,6 +24062,26 @@ } } }, + "maintenance_policy": { + "maxItemsOne": true, + "elem": { + "fields": { + "weekly_maintenance_window": { + "maxItemsOne": false, + "elem": { + "fields": { + "start_time": { + "maxItemsOne": true + } + } + } + } + } + } + }, + "maintenance_schedule": { + "maxItemsOne": false + }, "psc_configs": { "maxItemsOne": false }, @@ -24345,6 +24519,10 @@ } } }, + "google_scc_v2_folder_scc_big_query_export": { + "current": "gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport", + "majorVersion": 8 + }, "google_scc_v2_organization_mute_config": { "current": "gcp:securitycenter/v2OrganizationMuteConfig:V2OrganizationMuteConfig", "majorVersion": 8 @@ -24404,6 +24582,10 @@ } } }, + "google_scc_v2_project_scc_big_query_export": { + "current": "gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport", + "majorVersion": 8 + }, "google_secret_manager_secret": { "current": "gcp:secretmanager/secret:Secret", "majorVersion": 8, @@ -26891,6 +27073,41 @@ } } }, + "google_certificate_manager_certificates": { + "current": "gcp:certificatemanager/getCertificates:getCertificates", + "majorVersion": 8, + "fields": { + "certificates": { + "maxItemsOne": false, + "elem": { + "fields": { + "managed": { + "maxItemsOne": false, + "elem": { + "fields": { + "authorization_attempt_info": { + "maxItemsOne": false + }, + "dns_authorizations": { + "maxItemsOne": false + }, + "domains": { + "maxItemsOne": false + }, + "provisioning_issue": { + "maxItemsOne": false + } + } + } + }, + "san_dnsnames": { + "maxItemsOne": false + } + } + } + } + } + }, "google_client_config": { "current": "gcp:organizations/getClientConfig:getClientConfig", "majorVersion": 8 @@ -27508,6 +27725,9 @@ "scaling": { "maxItemsOne": false }, + "service_mesh": { + "maxItemsOne": false + }, "volumes": { "maxItemsOne": false, "elem": { @@ -30053,6 +30273,9 @@ } } } + }, + "node_kubelet_config": { + "maxItemsOne": false } } } @@ -30698,6 +30921,26 @@ }, "saml": { "maxItemsOne": false + }, + "x509": { + "maxItemsOne": false, + "elem": { + "fields": { + "trust_store": { + "maxItemsOne": false, + "elem": { + "fields": { + "intermediate_cas": { + "maxItemsOne": false + }, + "trust_anchors": { + "maxItemsOne": false + } + } + } + } + } + } } } }, @@ -30771,6 +31014,15 @@ "current": "gcp:kms/getCryptoKeyIamPolicy:getCryptoKeyIamPolicy", "majorVersion": 8 }, + "google_kms_crypto_key_latest_version": { + "current": "gcp:kms/getCryptoKeyLatestVersion:getCryptoKeyLatestVersion", + "majorVersion": 8, + "fields": { + "public_key": { + "maxItemsOne": false + } + } + }, "google_kms_crypto_key_version": { "current": "gcp:kms/getKMSCryptoKeyVersion:getKMSCryptoKeyVersion", "majorVersion": 8, @@ -30780,6 +31032,25 @@ } } }, + "google_kms_crypto_key_versions": { + "current": "gcp:kms/getCryptoKeyVersions:getCryptoKeyVersions", + "majorVersion": 8, + "fields": { + "public_key": { + "maxItemsOne": false + }, + "versions": { + "maxItemsOne": false, + "elem": { + "fields": { + "public_key": { + "maxItemsOne": false + } + } + } + } + } + }, "google_kms_crypto_keys": { "current": "gcp:kms/getCryptoKeys:getCryptoKeys", "majorVersion": 8, @@ -32269,6 +32540,10 @@ "effective_labels", "terraform_labels" ], + "google_backup_dr_backup_vault": [ + "effective_labels", + "terraform_labels" + ], "google_beyondcorp_app_connection": [ "effective_labels", "terraform_labels" @@ -32979,6 +33254,10 @@ "effective_labels", "terraform_labels" ], + "google_certificate_manager_certificates": [ + "certificates.$.effective_labels", + "certificates.$.terraform_labels" + ], "google_cloud_run_service": [ "metadata.$.effective_labels", "metadata.$.terraform_labels" @@ -33208,6 +33487,7 @@ "gcp:artifactregistry/repositoryIamPolicy:RepositoryIamPolicy": 0, "gcp:artifactregistry/vpcscConfig:VpcscConfig": 0, "gcp:assuredworkloads/workload:Workload": 0, + "gcp:backupdisasterrecovery/backupVault:BackupVault": 0, "gcp:backupdisasterrecovery/managementServer:ManagementServer": 0, "gcp:beyondcorp/appConnection:AppConnection": 0, "gcp:beyondcorp/appConnector:AppConnector": 0, @@ -33974,6 +34254,7 @@ "gcp:securitycenter/sourceIamPolicy:SourceIamPolicy": 0, "gcp:securitycenter/v2FolderMuteConfig:V2FolderMuteConfig": 0, "gcp:securitycenter/v2FolderNotificationConfig:V2FolderNotificationConfig": 0, + "gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport": 0, "gcp:securitycenter/v2OrganizationMuteConfig:V2OrganizationMuteConfig": 0, "gcp:securitycenter/v2OrganizationNotificationConfig:V2OrganizationNotificationConfig": 0, "gcp:securitycenter/v2OrganizationSccBigQueryExports:V2OrganizationSccBigQueryExports": 0, @@ -33983,6 +34264,7 @@ "gcp:securitycenter/v2OrganizationSourceIamPolicy:V2OrganizationSourceIamPolicy": 0, "gcp:securitycenter/v2ProjectMuteConfig:V2ProjectMuteConfig": 0, "gcp:securitycenter/v2ProjectNotificationConfig:V2ProjectNotificationConfig": 0, + "gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport": 0, "gcp:securityposture/posture:Posture": 0, "gcp:securityposture/postureDeployment:PostureDeployment": 0, "gcp:serviceaccount/account:Account": 0, @@ -34140,6 +34422,7 @@ "gcp:certificateauthority/getCaPoolIamPolicy:getCaPoolIamPolicy": 0, "gcp:certificateauthority/getCertificateTemplateIamPolicy:getCertificateTemplateIamPolicy": 0, "gcp:certificatemanager/getCertificateMap:getCertificateMap": 0, + "gcp:certificatemanager/getCertificates:getCertificates": 0, "gcp:cloudasset/getResourcesSearchAll:getResourcesSearchAll": 0, "gcp:cloudasset/getSearchAllResources:getSearchAllResources": 0, "gcp:cloudbuild/getTrigger:getTrigger": 0, @@ -34299,6 +34582,8 @@ "gcp:iap/getWebTypeAppEngineIamPolicy:getWebTypeAppEngineIamPolicy": 0, "gcp:iap/getWebTypeComputeIamPolicy:getWebTypeComputeIamPolicy": 0, "gcp:kms/getCryptoKeyIamPolicy:getCryptoKeyIamPolicy": 0, + "gcp:kms/getCryptoKeyLatestVersion:getCryptoKeyLatestVersion": 0, + "gcp:kms/getCryptoKeyVersions:getCryptoKeyVersions": 0, "gcp:kms/getCryptoKeys:getCryptoKeys": 0, "gcp:kms/getEkmConnectionIamPolicy:getEkmConnectionIamPolicy": 0, "gcp:kms/getKMSCryptoKey:getKMSCryptoKey": 0, diff --git a/provider/cmd/pulumi-resource-gcp/schema.json b/provider/cmd/pulumi-resource-gcp/schema.json index e7cc0c3285..b7acc73a43 100644 --- a/provider/cmd/pulumi-resource-gcp/schema.json +++ b/provider/cmd/pulumi-resource-gcp/schema.json @@ -3242,6 +3242,27 @@ "primaryClusterName" ] }, + "gcp:alloydb/ClusterTrialMetadata:ClusterTrialMetadata": { + "properties": { + "endTime": { + "type": "string", + "description": "End time of the trial cluster.\n" + }, + "graceEndTime": { + "type": "string", + "description": "Grace end time of the trial cluster.\n" + }, + "startTime": { + "type": "string", + "description": "Start time of the trial cluster.\n" + }, + "upgradeTime": { + "type": "string", + "description": "Upgrade time of the trial cluster to standard cluster.\n" + } + }, + "type": "object" + }, "gcp:alloydb/InstanceClientConnectionConfig:InstanceClientConnectionConfig": { "properties": { "requireConnectors": { @@ -8182,6 +8203,18 @@ "enableFailureEmail" ] }, + "gcp:bigquery/DataTransferConfigEncryptionConfiguration:DataTransferConfigEncryptionConfiguration": { + "properties": { + "kmsKeyName": { + "type": "string", + "description": "The name of the KMS key used for encrypting BigQuery data.\n" + } + }, + "type": "object", + "required": [ + "kmsKeyName" + ] + }, "gcp:bigquery/DataTransferConfigScheduleOptions:DataTransferConfigScheduleOptions": { "properties": { "disableAutoScheduling": { @@ -10191,11 +10224,41 @@ "title" ] }, + "gcp:bigqueryanalyticshub/DataExchangeSharingEnvironmentConfig:DataExchangeSharingEnvironmentConfig": { + "properties": { + "dcrExchangeConfig": { + "$ref": "#/types/gcp:bigqueryanalyticshub/DataExchangeSharingEnvironmentConfigDcrExchangeConfig:DataExchangeSharingEnvironmentConfigDcrExchangeConfig", + "description": "Data Clean Room (DCR), used for privacy-safe and secured data sharing.\n", + "willReplaceOnChanges": true + }, + "defaultExchangeConfig": { + "$ref": "#/types/gcp:bigqueryanalyticshub/DataExchangeSharingEnvironmentConfigDefaultExchangeConfig:DataExchangeSharingEnvironmentConfigDefaultExchangeConfig", + "description": "Default Analytics Hub data exchange, used for secured data sharing.\n", + "willReplaceOnChanges": true + } + }, + "type": "object" + }, + "gcp:bigqueryanalyticshub/DataExchangeSharingEnvironmentConfigDcrExchangeConfig:DataExchangeSharingEnvironmentConfigDcrExchangeConfig": { + "type": "object" + }, + "gcp:bigqueryanalyticshub/DataExchangeSharingEnvironmentConfigDefaultExchangeConfig:DataExchangeSharingEnvironmentConfigDefaultExchangeConfig": { + "type": "object" + }, "gcp:bigqueryanalyticshub/ListingBigqueryDataset:ListingBigqueryDataset": { "properties": { "dataset": { "type": "string", - "description": "Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123\n\n- - -\n" + "description": "Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123\n", + "willReplaceOnChanges": true + }, + "selectedResources": { + "type": "array", + "items": { + "$ref": "#/types/gcp:bigqueryanalyticshub/ListingBigqueryDatasetSelectedResource:ListingBigqueryDatasetSelectedResource" + }, + "description": "Resource in this dataset that is selectively shared. This field is required for data clean room exchanges.\nStructure is documented below.\n", + "willReplaceOnChanges": true } }, "type": "object", @@ -10203,6 +10266,16 @@ "dataset" ] }, + "gcp:bigqueryanalyticshub/ListingBigqueryDatasetSelectedResource:ListingBigqueryDatasetSelectedResource": { + "properties": { + "table": { + "type": "string", + "description": "Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:\"projects/test_project/datasets/test_dataset/tables/test_table\"\n\n- - -\n", + "willReplaceOnChanges": true + } + }, + "type": "object" + }, "gcp:bigqueryanalyticshub/ListingDataProvider:ListingDataProvider": { "properties": { "name": { @@ -10283,12 +10356,23 @@ "type": "boolean", "description": "If true, enable restricted export.\n" }, + "restrictDirectTableAccess": { + "type": "boolean", + "description": "(Output)\nIf true, restrict direct table access(read api/tabledata.list) on linked table.\n" + }, "restrictQueryResult": { "type": "boolean", "description": "If true, restrict export of query result derived from restricted linked dataset table.\n" } }, - "type": "object" + "type": "object", + "language": { + "nodejs": { + "requiredOutputs": [ + "restrictDirectTableAccess" + ] + } + } }, "gcp:bigquerydatapolicy/DataPolicyDataMaskingPolicy:DataPolicyDataMaskingPolicy": { "properties": { @@ -10578,6 +10662,10 @@ "family": { "type": "string", "description": "The name of the column family.\n" + }, + "type": { + "type": "string", + "description": "The type of the column family.\n" } }, "type": "object", @@ -14796,6 +14884,192 @@ } } }, + "gcp:certificatemanager/getCertificatesCertificate:getCertificatesCertificate": { + "properties": { + "description": { + "type": "string", + "description": "A human-readable description of the resource.\n" + }, + "effectiveLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "secret": true + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Set of label tags associated with the Certificate resource.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.\n" + }, + "location": { + "type": "string", + "description": "The Certificate Manager location. If not specified, \"global\" is used.\n" + }, + "manageds": { + "type": "array", + "items": { + "$ref": "#/types/gcp:certificatemanager/getCertificatesCertificateManaged:getCertificatesCertificateManaged" + }, + "description": "Configuration and state of a Managed Certificate.\nCertificate Manager provisions and renews Managed Certificates\nautomatically, for as long as it's authorized to do so.\n" + }, + "name": { + "type": "string", + "description": "A user-defined name of the certificate. Certificate names must be unique\nThe name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter,\nand all following characters must be a dash, underscore, letter or digit.\n" + }, + "project": { + "type": "string", + "description": "The ID of the project in which the resource belongs. If it\nis not provided, the provider project is used.\n" + }, + "pulumiLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.\n", + "secret": true + }, + "sanDnsnames": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6)\n" + }, + "scope": { + "type": "string", + "description": "The scope of the certificate.\n\nDEFAULT: Certificates with default scope are served from core Google data centers.\nIf unsure, choose this option.\n\nEDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence.\nSee https://cloud.google.com/vpc/docs/edge-locations.\n\nALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs).\nSee https://cloud.google.com/compute/docs/regions-zones\n" + } + }, + "type": "object", + "required": [ + "description", + "effectiveLabels", + "labels", + "location", + "manageds", + "name", + "project", + "sanDnsnames", + "scope", + "pulumiLabels" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, + "gcp:certificatemanager/getCertificatesCertificateManaged:getCertificatesCertificateManaged": { + "properties": { + "authorizationAttemptInfos": { + "type": "array", + "items": { + "$ref": "#/types/gcp:certificatemanager/getCertificatesCertificateManagedAuthorizationAttemptInfo:getCertificatesCertificateManagedAuthorizationAttemptInfo" + }, + "description": "Detailed state of the latest authorization attempt for each domain\nspecified for this Managed Certificate.\n" + }, + "dnsAuthorizations": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Authorizations that will be used for performing domain authorization. Either issuanceConfig or dnsAuthorizations should be specificed, but not both.\n" + }, + "domains": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The domains for which a managed SSL certificate will be generated.\nWildcard domains are only supported with DNS challenge resolution\n" + }, + "issuanceConfig": { + "type": "string", + "description": "The resource name for a CertificateIssuanceConfig used to configure private PKI certificates in the format projects/*/locations/*/certificateIssuanceConfigs/*.\nIf this field is not set, the certificates will instead be publicly signed as documented at https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa.\nEither issuanceConfig or dnsAuthorizations should be specificed, but not both.\n" + }, + "provisioningIssues": { + "type": "array", + "items": { + "$ref": "#/types/gcp:certificatemanager/getCertificatesCertificateManagedProvisioningIssue:getCertificatesCertificateManagedProvisioningIssue" + }, + "description": "Information about issues with provisioning this Managed Certificate.\n" + }, + "state": { + "type": "string", + "description": "A state of this Managed Certificate.\n" + } + }, + "type": "object", + "required": [ + "authorizationAttemptInfos", + "dnsAuthorizations", + "domains", + "issuanceConfig", + "provisioningIssues", + "state" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, + "gcp:certificatemanager/getCertificatesCertificateManagedAuthorizationAttemptInfo:getCertificatesCertificateManagedAuthorizationAttemptInfo": { + "properties": { + "details": { + "type": "string", + "description": "Human readable explanation for reaching the state. Provided to help\naddress the configuration issues.\nNot guaranteed to be stable. For programmatic access use 'failure_reason' field.\n" + }, + "domain": { + "type": "string", + "description": "Domain name of the authorization attempt.\n" + }, + "failureReason": { + "type": "string", + "description": "Reason for failure of the authorization attempt for the domain.\n" + }, + "state": { + "type": "string", + "description": "State of the domain for managed certificate issuance.\n" + } + }, + "type": "object", + "required": [ + "details", + "domain", + "failureReason", + "state" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, + "gcp:certificatemanager/getCertificatesCertificateManagedProvisioningIssue:getCertificatesCertificateManagedProvisioningIssue": { + "properties": { + "details": { + "type": "string", + "description": "Human readable explanation about the issue. Provided to help address\nthe configuration issues.\nNot guaranteed to be stable. For programmatic access use 'reason' field.\n" + }, + "reason": { + "type": "string", + "description": "Reason for provisioning failures.\n" + } + }, + "type": "object", + "required": [ + "details", + "reason" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, "gcp:cloudasset/FolderFeedCondition:FolderFeedCondition": { "properties": { "description": { @@ -16038,11 +16312,11 @@ "properties": { "diskSizeGb": { "type": "integer", - "description": "Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size.\n" + "description": "Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size.\n" }, "machineType": { "type": "string", - "description": "Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`.\n" + "description": "Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`.\n" }, "noExternalIp": { "type": "boolean", @@ -22076,7 +22350,7 @@ }, "nfs": { "$ref": "#/types/gcp:cloudrun/ServiceTemplateSpecVolumeNfs:ServiceTemplateSpecVolumeNfs", - "description": "A filesystem backed by a Network File System share. This filesystem requires the\nrun.googleapis.com/execution-environment annotation to be set to \"gen2\" and\nrun.googleapis.com/launch-stage set to \"BETA\" or \"ALPHA\".\nStructure is documented below.\n" + "description": "A filesystem backed by a Network File System share. This filesystem requires the\nrun.googleapis.com/execution-environment annotation to be unset or set to \"gen2\"\nStructure is documented below.\n" }, "secret": { "$ref": "#/types/gcp:cloudrun/ServiceTemplateSpecVolumeSecret:ServiceTemplateSpecVolumeSecret", @@ -22092,7 +22366,7 @@ "properties": { "driver": { "type": "string", - "description": "Unique name representing the type of file system to be created. Cloud Run supports the following values:\n* gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the\nrun.googleapis.com/execution-environment annotation to be set to \"gen2\" and\nrun.googleapis.com/launch-stage set to \"BETA\" or \"ALPHA\".\n" + "description": "Unique name representing the type of file system to be created. Cloud Run supports the following values:\n* gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the\nrun.googleapis.com/execution-environment annotation to be unset or set to \"gen2\"\n" }, "readOnly": { "type": "boolean", @@ -23218,7 +23492,7 @@ "items": { "$ref": "#/types/gcp:cloudrun/getServiceTemplateSpecVolumeNf:getServiceTemplateSpecVolumeNf" }, - "description": "A filesystem backed by a Network File System share. This filesystem requires the\nrun.googleapis.com/execution-environment annotation to be set to \"gen2\" and\nrun.googleapis.com/launch-stage set to \"BETA\" or \"ALPHA\".\n" + "description": "A filesystem backed by a Network File System share. This filesystem requires the\nrun.googleapis.com/execution-environment annotation to be unset or set to \"gen2\"\n" }, "secrets": { "type": "array", @@ -23246,7 +23520,7 @@ "properties": { "driver": { "type": "string", - "description": "Unique name representing the type of file system to be created. Cloud Run supports the following values:\n * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the\n run.googleapis.com/execution-environment annotation to be set to \"gen2\" and\n run.googleapis.com/launch-stage set to \"BETA\" or \"ALPHA\".\n" + "description": "Unique name representing the type of file system to be created. Cloud Run supports the following values:\n * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the\n run.googleapis.com/execution-environment annotation to be unset or set to \"gen2\"\n" }, "readOnly": { "type": "boolean", @@ -23821,7 +24095,7 @@ }, "gcs": { "$ref": "#/types/gcp:cloudrunv2/JobTemplateTemplateVolumeGcs:JobTemplateTemplateVolumeGcs", - "description": "Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA.\nStructure is documented below.\n" + "description": "Cloud Storage bucket mounted as a volume using GCSFuse.\nStructure is documented below.\n" }, "name": { "type": "string", @@ -23829,7 +24103,7 @@ }, "nfs": { "$ref": "#/types/gcp:cloudrunv2/JobTemplateTemplateVolumeNfs:JobTemplateTemplateVolumeNfs", - "description": "NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA.\nStructure is documented below.\n" + "description": "NFS share mounted as a volume.\nStructure is documented below.\n" }, "secret": { "$ref": "#/types/gcp:cloudrunv2/JobTemplateTemplateVolumeSecret:JobTemplateTemplateVolumeSecret", @@ -24218,6 +24492,10 @@ "type": "string", "description": "Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account.\n" }, + "serviceMesh": { + "$ref": "#/types/gcp:cloudrunv2/ServiceTemplateServiceMesh:ServiceTemplateServiceMesh", + "description": "Enables Cloud Service Mesh for this Revision.\nStructure is documented below.\n" + }, "sessionAffinity": { "type": "boolean", "description": "Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity\n" @@ -24673,6 +24951,15 @@ }, "type": "object" }, + "gcp:cloudrunv2/ServiceTemplateServiceMesh:ServiceTemplateServiceMesh": { + "properties": { + "mesh": { + "type": "string", + "description": "The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh.\n\n- - -\n" + } + }, + "type": "object" + }, "gcp:cloudrunv2/ServiceTemplateVolume:ServiceTemplateVolume": { "properties": { "cloudSqlInstance": { @@ -24685,7 +24972,7 @@ }, "gcs": { "$ref": "#/types/gcp:cloudrunv2/ServiceTemplateVolumeGcs:ServiceTemplateVolumeGcs", - "description": "Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA.\nStructure is documented below.\n" + "description": "Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment.\nStructure is documented below.\n" }, "name": { "type": "string", @@ -24754,7 +25041,7 @@ }, "readOnly": { "type": "boolean", - "description": "If true, mount the NFS volume as read only\n\n- - -\n" + "description": "If true, mount the NFS volume as read only\n" }, "server": { "type": "string", @@ -25425,7 +25712,7 @@ "items": { "$ref": "#/types/gcp:cloudrunv2/getJobTemplateTemplateVolumeGc:getJobTemplateTemplateVolumeGc" }, - "description": "Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA.\n" + "description": "Cloud Storage bucket mounted as a volume using GCSFuse.\n" }, "name": { "type": "string", @@ -25436,7 +25723,7 @@ "items": { "$ref": "#/types/gcp:cloudrunv2/getJobTemplateTemplateVolumeNf:getJobTemplateTemplateVolumeNf" }, - "description": "NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA.\n" + "description": "NFS share mounted as a volume.\n" }, "secrets": { "type": "array", @@ -25867,6 +26154,13 @@ "type": "string", "description": "Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account.\n" }, + "serviceMeshes": { + "type": "array", + "items": { + "$ref": "#/types/gcp:cloudrunv2/getServiceTemplateServiceMesh:getServiceTemplateServiceMesh" + }, + "description": "Enables Cloud Service Mesh for this Revision.\n" + }, "sessionAffinity": { "type": "boolean", "description": "Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity\n" @@ -25901,6 +26195,7 @@ "revision", "scalings", "serviceAccount", + "serviceMeshes", "sessionAffinity", "timeout", "volumes", @@ -26473,6 +26768,23 @@ } } }, + "gcp:cloudrunv2/getServiceTemplateServiceMesh:getServiceTemplateServiceMesh": { + "properties": { + "mesh": { + "type": "string", + "description": "The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh.\n" + } + }, + "type": "object", + "required": [ + "mesh" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, "gcp:cloudrunv2/getServiceTemplateVolume:getServiceTemplateVolume": { "properties": { "cloudSqlInstances": { @@ -26494,7 +26806,7 @@ "items": { "$ref": "#/types/gcp:cloudrunv2/getServiceTemplateVolumeGc:getServiceTemplateVolumeGc" }, - "description": "Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA.\n" + "description": "Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment.\n" }, "name": { "type": "string", @@ -27061,6 +27373,187 @@ } } }, + "gcp:cloudtasks/QueueHttpTarget:QueueHttpTarget": { + "properties": { + "headerOverrides": { + "type": "array", + "items": { + "$ref": "#/types/gcp:cloudtasks/QueueHttpTargetHeaderOverride:QueueHttpTargetHeaderOverride" + }, + "description": "HTTP target headers.\nThis map contains the header field names and values.\nHeaders will be set when running the CreateTask and/or BufferTask.\nThese headers represent a subset of the headers that will be configured for the task's HTTP request.\nSome HTTP request headers will be ignored or replaced.\nHeaders which can have multiple values (according to RFC2616) can be specified using comma-separated values.\nThe size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue.\nStructure is documented below.\n" + }, + "httpMethod": { + "type": "string", + "description": "The HTTP method to use for the request.\nWhen specified, it overrides HttpRequest for the task.\nNote that if the value is set to GET the body of the task will be ignored at execution time.\nPossible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`.\n" + }, + "oauthToken": { + "$ref": "#/types/gcp:cloudtasks/QueueHttpTargetOauthToken:QueueHttpTargetOauthToken", + "description": "If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request.\nThis type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com.\nNote that both the service account email and the scope MUST be specified when using the queue-level authorization override.\nStructure is documented below.\n" + }, + "oidcToken": { + "$ref": "#/types/gcp:cloudtasks/QueueHttpTargetOidcToken:QueueHttpTargetOidcToken", + "description": "If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request.\nThis type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself.\nNote that both the service account email and the audience MUST be specified when using the queue-level authorization override.\nStructure is documented below.\n" + }, + "uriOverride": { + "$ref": "#/types/gcp:cloudtasks/QueueHttpTargetUriOverride:QueueHttpTargetUriOverride", + "description": "URI override.\nWhen specified, overrides the execution URI for all the tasks in the queue.\nStructure is documented below.\n" + } + }, + "type": "object", + "language": { + "nodejs": { + "requiredOutputs": [ + "httpMethod" + ] + } + } + }, + "gcp:cloudtasks/QueueHttpTargetHeaderOverride:QueueHttpTargetHeaderOverride": { + "properties": { + "header": { + "$ref": "#/types/gcp:cloudtasks/QueueHttpTargetHeaderOverrideHeader:QueueHttpTargetHeaderOverrideHeader", + "description": "Header embodying a key and a value.\nStructure is documented below.\n" + } + }, + "type": "object", + "required": [ + "header" + ] + }, + "gcp:cloudtasks/QueueHttpTargetHeaderOverrideHeader:QueueHttpTargetHeaderOverrideHeader": { + "properties": { + "key": { + "type": "string", + "description": "The Key of the header.\n" + }, + "value": { + "type": "string", + "description": "The Value of the header.\n" + } + }, + "type": "object", + "required": [ + "key", + "value" + ] + }, + "gcp:cloudtasks/QueueHttpTargetOauthToken:QueueHttpTargetOauthToken": { + "properties": { + "scope": { + "type": "string", + "description": "OAuth scope to be used for generating OAuth access token.\nIf not specified, \"https://www.googleapis.com/auth/cloud-platform\" will be used.\n" + }, + "serviceAccountEmail": { + "type": "string", + "description": "Service account email to be used for generating OAuth token.\nThe service account must be within the same project as the queue.\nThe caller must have iam.serviceAccounts.actAs permission for the service account.\n" + } + }, + "type": "object", + "required": [ + "serviceAccountEmail" + ], + "language": { + "nodejs": { + "requiredOutputs": [ + "scope", + "serviceAccountEmail" + ] + } + } + }, + "gcp:cloudtasks/QueueHttpTargetOidcToken:QueueHttpTargetOidcToken": { + "properties": { + "audience": { + "type": "string", + "description": "Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used.\n" + }, + "serviceAccountEmail": { + "type": "string", + "description": "Service account email to be used for generating OIDC token.\nThe service account must be within the same project as the queue.\nThe caller must have iam.serviceAccounts.actAs permission for the service account.\n" + } + }, + "type": "object", + "required": [ + "serviceAccountEmail" + ], + "language": { + "nodejs": { + "requiredOutputs": [ + "audience", + "serviceAccountEmail" + ] + } + } + }, + "gcp:cloudtasks/QueueHttpTargetUriOverride:QueueHttpTargetUriOverride": { + "properties": { + "host": { + "type": "string", + "description": "Host override.\nWhen specified, replaces the host part of the task URL.\nFor example, if the task URL is \"https://www.google.com\", and host value\nis set to \"example.net\", the overridden URI will be changed to \"https://example.net\".\nHost value cannot be an empty string (INVALID_ARGUMENT).\n" + }, + "pathOverride": { + "$ref": "#/types/gcp:cloudtasks/QueueHttpTargetUriOverridePathOverride:QueueHttpTargetUriOverridePathOverride", + "description": "URI path.\nWhen specified, replaces the existing path of the task URL.\nSetting the path value to an empty string clears the URI path segment.\nStructure is documented below.\n" + }, + "port": { + "type": "string", + "description": "Port override.\nWhen specified, replaces the port part of the task URI.\nFor instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo.\nNote that the port value must be a positive integer.\nSetting the port to 0 (Zero) clears the URI port.\n" + }, + "queryOverride": { + "$ref": "#/types/gcp:cloudtasks/QueueHttpTargetUriOverrideQueryOverride:QueueHttpTargetUriOverrideQueryOverride", + "description": "URI query.\nWhen specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment.\nStructure is documented below.\n" + }, + "scheme": { + "type": "string", + "description": "Scheme override.\nWhen specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS).\nPossible values are: `HTTP`, `HTTPS`.\n" + }, + "uriOverrideEnforceMode": { + "type": "string", + "description": "URI Override Enforce Mode\nWhen specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS.\nPossible values are: `ALWAYS`, `IF_NOT_EXISTS`.\n" + } + }, + "type": "object", + "language": { + "nodejs": { + "requiredOutputs": [ + "scheme", + "uriOverrideEnforceMode" + ] + } + } + }, + "gcp:cloudtasks/QueueHttpTargetUriOverridePathOverride:QueueHttpTargetUriOverridePathOverride": { + "properties": { + "path": { + "type": "string", + "description": "The URI path (e.g., /users/1234). Default is an empty string.\n" + } + }, + "type": "object", + "language": { + "nodejs": { + "requiredOutputs": [ + "path" + ] + } + } + }, + "gcp:cloudtasks/QueueHttpTargetUriOverrideQueryOverride:QueueHttpTargetUriOverrideQueryOverride": { + "properties": { + "queryParams": { + "type": "string", + "description": "The query parameters (e.g., qparam1=123\u0026qparam2=456). Default is an empty string.\n" + } + }, + "type": "object", + "language": { + "nodejs": { + "requiredOutputs": [ + "queryParams" + ] + } + } + }, "gcp:cloudtasks/QueueIamBindingCondition:QueueIamBindingCondition": { "properties": { "description": { @@ -29961,7 +30454,7 @@ "items": { "type": "string" }, - "description": "An optional list of ports to which this rule applies. This field\nis only applicable for UDP or TCP protocol. Each entry must be\neither an integer or a range. If not specified, this rule\napplies to connections through any port.\nExample inputs include: [\"22\"], [\"80\",\"443\"], and\n[\"12345-12349\"].\n" + "description": "An optional list of ports to which this rule applies. This field\nis only applicable for UDP or TCP protocol. Each entry must be\neither an integer or a range. If not specified, this rule\napplies to connections through any port.\nExample inputs include: [22], [80, 443], and\n[\"12345-12349\"].\n" }, "protocol": { "type": "string", @@ -29980,7 +30473,7 @@ "items": { "type": "string" }, - "description": "An optional list of ports to which this rule applies. This field\nis only applicable for UDP or TCP protocol. Each entry must be\neither an integer or a range. If not specified, this rule\napplies to connections through any port.\nExample inputs include: [\"22\"], [\"80\",\"443\"], and\n[\"12345-12349\"].\n" + "description": "An optional list of ports to which this rule applies. This field\nis only applicable for UDP or TCP protocol. Each entry must be\neither an integer or a range. If not specified, this rule\napplies to connections through any port.\nExample inputs include: [22], [80, 443], and\n[\"12345-12349\"].\n" }, "protocol": { "type": "string", @@ -30592,6 +31085,10 @@ "description": "Parameters for a new disk that will be created\nalongside the new instance. Either `initialize_params` or `source` must be set.\nStructure is documented below.\n", "willReplaceOnChanges": true }, + "interface": { + "type": "string", + "description": "The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.)\n" + }, "kmsKeySelfLink": { "type": "string", "description": "The self_link of the encryption key that is\nstored in Google Cloud KMS to encrypt this disk. Only one of `kms_key_self_link`\nand `disk_encryption_key_raw` may be set.\n", @@ -30800,6 +31297,10 @@ "description": "Parameters with which a disk was created alongside the instance.\n", "willReplaceOnChanges": true }, + "interface": { + "type": "string", + "description": "The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.)\n" + }, "kmsKeySelfLink": { "type": "string", "description": "The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.\n", @@ -30825,6 +31326,7 @@ "diskEncryptionKeyRaw", "diskEncryptionKeySha256", "initializeParams", + "interface", "kmsKeySelfLink", "mode", "source" @@ -31573,6 +32075,10 @@ "description": "Parameters with which a disk was created alongside the instance.\n", "willReplaceOnChanges": true }, + "interface": { + "type": "string", + "description": "The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.)\n" + }, "kmsKeySelfLink": { "type": "string", "description": "The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.\n", @@ -31598,6 +32104,7 @@ "diskEncryptionKeyRaw", "diskEncryptionKeySha256", "initializeParams", + "interface", "kmsKeySelfLink", "mode", "source" @@ -32755,7 +33262,7 @@ }, "subnetworkProject": { "type": "string", - "description": "The project in which the subnetwork belongs.\nIf the `subnetwork` is a self_link, this field is ignored in favor of the project\ndefined in the subnetwork self_link. If the `subnetwork` is a name and this\nfield is not provided, the provider project is used.\n" + "description": "The project in which the subnetwork belongs.\nIf the `subnetwork` is a self_link, this field is set to the project\ndefined in the subnetwork self_link. If the `subnetwork` is a name and this\nfield is not provided, the provider project is used.\n" } }, "type": "object", @@ -34355,6 +34862,21 @@ "projectId" ] }, + "gcp:compute/NodeTemplateAccelerator:NodeTemplateAccelerator": { + "properties": { + "acceleratorCount": { + "type": "integer", + "description": "The number of the guest accelerator cards exposed to this\nnode template.\n", + "willReplaceOnChanges": true + }, + "acceleratorType": { + "type": "string", + "description": "Full or partial URL of the accelerator type resource to expose\nto this node template.\n", + "willReplaceOnChanges": true + } + }, + "type": "object" + }, "gcp:compute/NodeTemplateNodeTypeFlexibility:NodeTemplateNodeTypeFlexibility": { "properties": { "cpus": { @@ -44933,6 +45455,10 @@ }, "description": "Parameters with which a disk was created alongside the instance.\nStructure is documented below.\n" }, + "interface": { + "type": "string", + "description": "The disk interface used for attaching this disk. One of `SCSI` or `NVME`.\n" + }, "kmsKeySelfLink": { "type": "string", "description": "The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.\n" @@ -44953,6 +45479,7 @@ "diskEncryptionKeyRaw", "diskEncryptionKeySha256", "initializeParams", + "interface", "kmsKeySelfLink", "mode", "source" @@ -52245,8 +52772,7 @@ }, "gcfsConfig": { "$ref": "#/types/gcp:container/ClusterNodeConfigGcfsConfig:ClusterNodeConfigGcfsConfig", - "description": "Parameters for the Google Container Filesystem (GCFS).\nIf unspecified, GCFS will not be enabled on the node pool. When enabling this feature you must specify `image_type = \"COS_CONTAINERD\"` and `node_version` from GKE versions 1.19 or later to use it.\nFor GKE versions 1.19, 1.20, and 1.21, the recommended minimum `node_version` would be 1.19.15-gke.1300, 1.20.11-gke.1300, and 1.21.5-gke.1300 respectively.\nA `machine_type` that has more than 16 GiB of memory is also recommended.\nGCFS must be enabled in order to use [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming).\nStructure is documented below.\n\n", - "willReplaceOnChanges": true + "description": "Parameters for the Google Container Filesystem (GCFS).\nIf unspecified, GCFS will not be enabled on the node pool. When enabling this feature you must specify `image_type = \"COS_CONTAINERD\"` and `node_version` from GKE versions 1.19 or later to use it.\nFor GKE versions 1.19, 1.20, and 1.21, the recommended minimum `node_version` would be 1.19.15-gke.1300, 1.20.11-gke.1300, and 1.21.5-gke.1300 respectively.\nA `machine_type` that has more than 16 GiB of memory is also recommended.\nGCFS must be enabled in order to use [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming).\nStructure is documented below.\n\n" }, "guestAccelerators": { "type": "array", @@ -52591,8 +53117,7 @@ "properties": { "enabled": { "type": "boolean", - "description": "Whether or not the Google Container Filesystem (GCFS) is enabled\n", - "willReplaceOnChanges": true + "description": "Whether or not the Google Container Filesystem (GCFS) is enabled\n" } }, "type": "object", @@ -52715,6 +53240,10 @@ "type": "string", "description": "The CPU management policy on the node. See\n[K8S CPU Management Policies](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/).\nOne of `\"none\"` or `\"static\"`. Defaults to `none` when `kubelet_config` is unset.\n" }, + "insecureKubeletReadonlyPortEnabled": { + "type": "string", + "description": "Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.\n" + }, "podPidsLimit": { "type": "integer", "description": "Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.\n" @@ -52723,7 +53252,15 @@ "type": "object", "required": [ "cpuManagerPolicy" - ] + ], + "language": { + "nodejs": { + "requiredOutputs": [ + "cpuManagerPolicy", + "insecureKubeletReadonlyPortEnabled" + ] + } + } }, "gcp:container/ClusterNodeConfigLinuxNodeConfig:ClusterNodeConfigLinuxNodeConfig": { "properties": { @@ -53017,7 +53554,11 @@ "properties": { "networkTags": { "$ref": "#/types/gcp:container/ClusterNodePoolAutoConfigNetworkTags:ClusterNodePoolAutoConfigNetworkTags", - "description": "The network tag config for the cluster's automatically provisioned node pools.\n" + "description": "The network tag config for the cluster's automatically provisioned node pools. Structure is documented below.\n" + }, + "nodeKubeletConfig": { + "$ref": "#/types/gcp:container/ClusterNodePoolAutoConfigNodeKubeletConfig:ClusterNodePoolAutoConfigNodeKubeletConfig", + "description": "Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here.\nStructure is documented below.\n" }, "resourceManagerTags": { "type": "object", @@ -53041,6 +53582,22 @@ }, "type": "object" }, + "gcp:container/ClusterNodePoolAutoConfigNodeKubeletConfig:ClusterNodePoolAutoConfigNodeKubeletConfig": { + "properties": { + "insecureKubeletReadonlyPortEnabled": { + "type": "string", + "description": "Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.\n" + } + }, + "type": "object", + "language": { + "nodejs": { + "requiredOutputs": [ + "insecureKubeletReadonlyPortEnabled" + ] + } + } + }, "gcp:container/ClusterNodePoolAutoscaling:ClusterNodePoolAutoscaling": { "properties": { "locationPolicy": { @@ -53092,6 +53649,10 @@ "$ref": "#/types/gcp:container/ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfig:ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfig", "description": "The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming) across all the node pools within the cluster. Structure is documented below.\n" }, + "insecureKubeletReadonlyPortEnabled": { + "type": "string", + "description": "Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.\n" + }, "loggingVariant": { "type": "string", "description": "The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information.\n" @@ -53101,6 +53662,7 @@ "language": { "nodejs": { "requiredOutputs": [ + "insecureKubeletReadonlyPortEnabled", "loggingVariant" ] } @@ -53373,8 +53935,7 @@ }, "gcfsConfig": { "$ref": "#/types/gcp:container/ClusterNodePoolNodeConfigGcfsConfig:ClusterNodePoolNodeConfigGcfsConfig", - "description": "Parameters for the Google Container Filesystem (GCFS).\nIf unspecified, GCFS will not be enabled on the node pool. When enabling this feature you must specify `image_type = \"COS_CONTAINERD\"` and `node_version` from GKE versions 1.19 or later to use it.\nFor GKE versions 1.19, 1.20, and 1.21, the recommended minimum `node_version` would be 1.19.15-gke.1300, 1.20.11-gke.1300, and 1.21.5-gke.1300 respectively.\nA `machine_type` that has more than 16 GiB of memory is also recommended.\nGCFS must be enabled in order to use [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming).\nStructure is documented below.\n\n", - "willReplaceOnChanges": true + "description": "Parameters for the Google Container Filesystem (GCFS).\nIf unspecified, GCFS will not be enabled on the node pool. When enabling this feature you must specify `image_type = \"COS_CONTAINERD\"` and `node_version` from GKE versions 1.19 or later to use it.\nFor GKE versions 1.19, 1.20, and 1.21, the recommended minimum `node_version` would be 1.19.15-gke.1300, 1.20.11-gke.1300, and 1.21.5-gke.1300 respectively.\nA `machine_type` that has more than 16 GiB of memory is also recommended.\nGCFS must be enabled in order to use [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming).\nStructure is documented below.\n\n" }, "guestAccelerators": { "type": "array", @@ -53716,8 +54277,7 @@ "properties": { "enabled": { "type": "boolean", - "description": "Whether or not the Google Container Filesystem (GCFS) is enabled\n", - "willReplaceOnChanges": true + "description": "Whether or not the Google Container Filesystem (GCFS) is enabled\n" } }, "type": "object", @@ -53840,6 +54400,10 @@ "type": "string", "description": "The CPU management policy on the node. See\n[K8S CPU Management Policies](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/).\nOne of `\"none\"` or `\"static\"`. Defaults to `none` when `kubelet_config` is unset.\n" }, + "insecureKubeletReadonlyPortEnabled": { + "type": "string", + "description": "Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.\n" + }, "podPidsLimit": { "type": "integer", "description": "Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.\n" @@ -53848,7 +54412,15 @@ "type": "object", "required": [ "cpuManagerPolicy" - ] + ], + "language": { + "nodejs": { + "requiredOutputs": [ + "cpuManagerPolicy", + "insecureKubeletReadonlyPortEnabled" + ] + } + } }, "gcp:container/ClusterNodePoolNodeConfigLinuxNodeConfig:ClusterNodePoolNodeConfigLinuxNodeConfig": { "properties": { @@ -54683,8 +55255,7 @@ }, "gcfsConfig": { "$ref": "#/types/gcp:container/NodePoolNodeConfigGcfsConfig:NodePoolNodeConfigGcfsConfig", - "description": "GCFS configuration for this node.\n", - "willReplaceOnChanges": true + "description": "GCFS configuration for this node.\n" }, "guestAccelerators": { "type": "array", @@ -55026,8 +55597,7 @@ "properties": { "enabled": { "type": "boolean", - "description": "Whether or not GCFS is enabled\n", - "willReplaceOnChanges": true + "description": "Whether or not GCFS is enabled\n" } }, "type": "object", @@ -55150,6 +55720,10 @@ "type": "string", "description": "Control the CPU management policy on the node.\n" }, + "insecureKubeletReadonlyPortEnabled": { + "type": "string", + "description": "Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.\n" + }, "podPidsLimit": { "type": "integer", "description": "Controls the maximum number of processes allowed to run in a pod.\n" @@ -55158,7 +55732,15 @@ "type": "object", "required": [ "cpuManagerPolicy" - ] + ], + "language": { + "nodejs": { + "requiredOutputs": [ + "cpuManagerPolicy", + "insecureKubeletReadonlyPortEnabled" + ] + } + } }, "gcp:container/NodePoolNodeConfigLinuxNodeConfig:NodePoolNodeConfigLinuxNodeConfig": { "properties": { @@ -57523,6 +58105,10 @@ "type": "string", "description": "Control the CPU management policy on the node.\n" }, + "insecureKubeletReadonlyPortEnabled": { + "type": "string", + "description": "Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.\n" + }, "podPidsLimit": { "type": "integer", "description": "Controls the maximum number of processes allowed to run in a pod.\n" @@ -57533,6 +58119,7 @@ "cpuCfsQuota", "cpuCfsQuotaPeriod", "cpuManagerPolicy", + "insecureKubeletReadonlyPortEnabled", "podPidsLimit" ], "language": { @@ -57898,6 +58485,13 @@ }, "description": "Collection of Compute Engine network tags that can be applied to a node's underlying VM instance.\n" }, + "nodeKubeletConfigs": { + "type": "array", + "items": { + "$ref": "#/types/gcp:container/getClusterNodePoolAutoConfigNodeKubeletConfig:getClusterNodePoolAutoConfigNodeKubeletConfig" + }, + "description": "Node kubelet configs.\n" + }, "resourceManagerTags": { "type": "object", "additionalProperties": { @@ -57909,6 +58503,7 @@ "type": "object", "required": [ "networkTags", + "nodeKubeletConfigs", "resourceManagerTags" ], "language": { @@ -57937,6 +58532,23 @@ } } }, + "gcp:container/getClusterNodePoolAutoConfigNodeKubeletConfig:getClusterNodePoolAutoConfigNodeKubeletConfig": { + "properties": { + "insecureKubeletReadonlyPortEnabled": { + "type": "string", + "description": "Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.\n" + } + }, + "type": "object", + "required": [ + "insecureKubeletReadonlyPortEnabled" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, "gcp:container/getClusterNodePoolAutoscaling:getClusterNodePoolAutoscaling": { "properties": { "locationPolicy": { @@ -58010,6 +58622,10 @@ }, "description": "GCFS configuration for this node.\n" }, + "insecureKubeletReadonlyPortEnabled": { + "type": "string", + "description": "Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.\n" + }, "loggingVariant": { "type": "string", "description": "Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.\n" @@ -58019,6 +58635,7 @@ "required": [ "containerdConfigs", "gcfsConfigs", + "insecureKubeletReadonlyPortEnabled", "loggingVariant" ], "language": { @@ -58949,6 +59566,10 @@ "type": "string", "description": "Control the CPU management policy on the node.\n" }, + "insecureKubeletReadonlyPortEnabled": { + "type": "string", + "description": "Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.\n" + }, "podPidsLimit": { "type": "integer", "description": "Controls the maximum number of processes allowed to run in a pod.\n" @@ -58959,6 +59580,7 @@ "cpuCfsQuota", "cpuCfsQuotaPeriod", "cpuManagerPolicy", + "insecureKubeletReadonlyPortEnabled", "podPidsLimit" ], "language": { @@ -60188,11 +60810,11 @@ }, "host": { "type": "string", - "description": "Required. The IP or hostname of the source MySQL database.\n" + "description": "The IP or hostname of the source MySQL database.\n" }, "password": { "type": "string", - "description": "Required. Input only. The password for the user that Database Migration Service will be using to connect to the database.\nThis field is not returned on request, and the value is encrypted when stored in Database Migration Service.\n**Note**: This property is sensitive and will not be displayed in the plan.\n", + "description": "Input only. The password for the user that Database Migration Service will be using to connect to the database.\nThis field is not returned on request, and the value is encrypted when stored in Database Migration Service.\n**Note**: This property is sensitive and will not be displayed in the plan.\n", "secret": true, "willReplaceOnChanges": true }, @@ -60202,7 +60824,7 @@ }, "port": { "type": "integer", - "description": "Required. The network port of the source MySQL database.\n" + "description": "The network port of the source MySQL database.\n" }, "ssl": { "$ref": "#/types/gcp:databasemigrationservice/ConnectionProfileMysqlSsl:ConnectionProfileMysqlSsl", @@ -60210,24 +60832,14 @@ }, "username": { "type": "string", - "description": "Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service.\n" + "description": "The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service.\n" } }, "type": "object", - "required": [ - "host", - "password", - "port", - "username" - ], "language": { "nodejs": { "requiredOutputs": [ - "host", - "password", - "passwordSet", - "port", - "username" + "passwordSet" ] } } @@ -60425,13 +61037,17 @@ }, "gcp:databasemigrationservice/ConnectionProfilePostgresql:ConnectionProfilePostgresql": { "properties": { + "alloydbClusterId": { + "type": "string", + "description": "If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID.\n" + }, "cloudSqlId": { "type": "string", "description": "If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source.\n" }, "host": { "type": "string", - "description": "Required. The IP or hostname of the source MySQL database.\n" + "description": "The IP or hostname of the source MySQL database.\n" }, "networkArchitecture": { "type": "string", @@ -60439,7 +61055,7 @@ }, "password": { "type": "string", - "description": "Required. Input only. The password for the user that Database Migration Service will be using to connect to the database.\nThis field is not returned on request, and the value is encrypted when stored in Database Migration Service.\n**Note**: This property is sensitive and will not be displayed in the plan.\n", + "description": "Input only. The password for the user that Database Migration Service will be using to connect to the database.\nThis field is not returned on request, and the value is encrypted when stored in Database Migration Service.\n**Note**: This property is sensitive and will not be displayed in the plan.\n", "secret": true, "willReplaceOnChanges": true }, @@ -60449,7 +61065,7 @@ }, "port": { "type": "integer", - "description": "Required. The network port of the source MySQL database.\n" + "description": "The network port of the source MySQL database.\n" }, "ssl": { "$ref": "#/types/gcp:databasemigrationservice/ConnectionProfilePostgresqlSsl:ConnectionProfilePostgresqlSsl", @@ -60457,25 +61073,15 @@ }, "username": { "type": "string", - "description": "Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service.\n" + "description": "The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service.\n" } }, "type": "object", - "required": [ - "host", - "password", - "port", - "username" - ], "language": { "nodejs": { "requiredOutputs": [ - "host", "networkArchitecture", - "password", - "passwordSet", - "port", - "username" + "passwordSet" ] } } @@ -65234,6 +65840,10 @@ "pubSubNotification": { "$ref": "#/types/gcp:dataloss/PreventionDiscoveryConfigActionPubSubNotification:PreventionDiscoveryConfigActionPubSubNotification", "description": "Publish a message into the Pub/Sub topic.\nStructure is documented below.\n" + }, + "tagResources": { + "$ref": "#/types/gcp:dataloss/PreventionDiscoveryConfigActionTagResources:PreventionDiscoveryConfigActionTagResources", + "description": "Publish a message into the Pub/Sub topic.\nStructure is documented below.\n" } }, "type": "object" @@ -65323,6 +65933,63 @@ }, "type": "object" }, + "gcp:dataloss/PreventionDiscoveryConfigActionTagResources:PreventionDiscoveryConfigActionTagResources": { + "properties": { + "lowerDataRiskToLow": { + "type": "boolean", + "description": "Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles.\n" + }, + "profileGenerationsToTags": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`.\nEach value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`.\n" + }, + "tagConditions": { + "type": "array", + "items": { + "$ref": "#/types/gcp:dataloss/PreventionDiscoveryConfigActionTagResourcesTagCondition:PreventionDiscoveryConfigActionTagResourcesTagCondition" + }, + "description": "The tags to associate with different conditions.\nStructure is documented below.\n" + } + }, + "type": "object" + }, + "gcp:dataloss/PreventionDiscoveryConfigActionTagResourcesTagCondition:PreventionDiscoveryConfigActionTagResourcesTagCondition": { + "properties": { + "sensitivityScore": { + "$ref": "#/types/gcp:dataloss/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore:PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore", + "description": "Conditions attaching the tag to a resource on its profile having this sensitivity score.\nStructure is documented below.\n" + }, + "tag": { + "$ref": "#/types/gcp:dataloss/PreventionDiscoveryConfigActionTagResourcesTagConditionTag:PreventionDiscoveryConfigActionTagResourcesTagConditionTag", + "description": "The tag value to attach to resources.\nStructure is documented below.\n" + } + }, + "type": "object" + }, + "gcp:dataloss/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore:PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore": { + "properties": { + "score": { + "type": "string", + "description": "The sensitivity score applied to the resource.\nPossible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`.\n" + } + }, + "type": "object", + "required": [ + "score" + ] + }, + "gcp:dataloss/PreventionDiscoveryConfigActionTagResourcesTagConditionTag:PreventionDiscoveryConfigActionTagResourcesTagConditionTag": { + "properties": { + "namespacedValue": { + "type": "string", + "description": "The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, \"123456/environment/prod\".\n" + } + }, + "type": "object" + }, "gcp:dataloss/PreventionDiscoveryConfigError:PreventionDiscoveryConfigError": { "properties": { "details": { @@ -65429,6 +66096,10 @@ }, "gcp:dataloss/PreventionDiscoveryConfigTargetBigQueryTargetCadence:PreventionDiscoveryConfigTargetBigQueryTargetCadence": { "properties": { + "inspectTemplateModifiedCadence": { + "$ref": "#/types/gcp:dataloss/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence:PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence", + "description": "Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update.\nStructure is documented below.\n" + }, "schemaModifiedCadence": { "$ref": "#/types/gcp:dataloss/PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence:PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence", "description": "Governs when to update data profiles when a schema is modified\nStructure is documented below.\n" @@ -65440,6 +66111,15 @@ }, "type": "object" }, + "gcp:dataloss/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence:PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence": { + "properties": { + "frequency": { + "type": "string", + "description": "How frequently data profiles can be updated when the template is modified. Defaults to never.\nPossible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`.\n" + } + }, + "type": "object" + }, "gcp:dataloss/PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence:PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence": { "properties": { "frequency": { @@ -65733,6 +66413,10 @@ }, "gcp:dataloss/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence:PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence": { "properties": { + "inspectTemplateModifiedCadence": { + "$ref": "#/types/gcp:dataloss/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence:PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence", + "description": "Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update.\nStructure is documented below.\n" + }, "refreshFrequency": { "type": "string", "description": "Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never.\nPossible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`.\n" @@ -65744,6 +66428,18 @@ }, "type": "object" }, + "gcp:dataloss/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence:PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence": { + "properties": { + "frequency": { + "type": "string", + "description": "How frequently data profiles can be updated when the template is modified. Defaults to never.\nPossible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`.\n" + } + }, + "type": "object", + "required": [ + "frequency" + ] + }, "gcp:dataloss/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence:PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence": { "properties": { "frequency": { @@ -72777,7 +73473,7 @@ }, "stagingBucket": { "type": "string", - "description": "A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).\n", + "description": "A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).\n", "willReplaceOnChanges": true }, "tempBucket": { @@ -72858,7 +73554,7 @@ "additionalProperties": { "type": "string" }, - "description": "The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).\n", + "description": "The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)).\n", "willReplaceOnChanges": true }, "network": { @@ -72909,7 +73605,7 @@ "items": { "type": "string" }, - "description": "The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).\n", + "description": "The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)).\n", "willReplaceOnChanges": true }, "zone": { @@ -73018,7 +73714,7 @@ }, "executionTimeout": { "type": "string", - "description": "Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.\n", + "description": "Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.\n", "willReplaceOnChanges": true } }, @@ -73028,22 +73724,22 @@ "properties": { "autoDeleteTime": { "type": "string", - "description": "The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).\n", + "description": "The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n", "willReplaceOnChanges": true }, "autoDeleteTtl": { "type": "string", - "description": "The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).\n", + "description": "The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n", "willReplaceOnChanges": true }, "idleDeleteTtl": { "type": "string", - "description": "The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json).\n", + "description": "The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json).\n", "willReplaceOnChanges": true }, "idleStartTime": { "type": "string", - "description": "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)).\n" + "description": "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)).\n" } }, "type": "object", @@ -73100,7 +73796,7 @@ }, "minCpuPlatform": { "type": "string", - "description": "Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).\n", + "description": "Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).\n", "willReplaceOnChanges": true }, "numInstances": { @@ -75552,6 +76248,10 @@ }, "gcp:datastream/StreamSourceConfigSqlServerSourceConfig:StreamSourceConfigSqlServerSourceConfig": { "properties": { + "changeTables": { + "$ref": "#/types/gcp:datastream/StreamSourceConfigSqlServerSourceConfigChangeTables:StreamSourceConfigSqlServerSourceConfigChangeTables", + "description": "CDC reader reads from change tables.\n" + }, "excludeObjects": { "$ref": "#/types/gcp:datastream/StreamSourceConfigSqlServerSourceConfigExcludeObjects:StreamSourceConfigSqlServerSourceConfigExcludeObjects", "description": "SQL Server objects to exclude from the stream.\nStructure is documented below.\n" @@ -75567,6 +76267,10 @@ "maxConcurrentCdcTasks": { "type": "integer", "description": "Max concurrent CDC tasks.\n" + }, + "transactionLogs": { + "$ref": "#/types/gcp:datastream/StreamSourceConfigSqlServerSourceConfigTransactionLogs:StreamSourceConfigSqlServerSourceConfigTransactionLogs", + "description": "CDC reader reads from transaction logs.\n" } }, "type": "object", @@ -75579,6 +76283,9 @@ } } }, + "gcp:datastream/StreamSourceConfigSqlServerSourceConfigChangeTables:StreamSourceConfigSqlServerSourceConfigChangeTables": { + "type": "object" + }, "gcp:datastream/StreamSourceConfigSqlServerSourceConfigExcludeObjects:StreamSourceConfigSqlServerSourceConfigExcludeObjects": { "properties": { "schemas": { @@ -75783,6 +76490,9 @@ } } }, + "gcp:datastream/StreamSourceConfigSqlServerSourceConfigTransactionLogs:StreamSourceConfigSqlServerSourceConfigTransactionLogs": { + "type": "object" + }, "gcp:deploymentmanager/DeploymentLabel:DeploymentLabel": { "properties": { "key": { @@ -78440,6 +79150,10 @@ }, "gcp:discoveryengine/DataStoreDocumentProcessingConfig:DataStoreDocumentProcessingConfig": { "properties": { + "chunkingConfig": { + "$ref": "#/types/gcp:discoveryengine/DataStoreDocumentProcessingConfigChunkingConfig:DataStoreDocumentProcessingConfigChunkingConfig", + "description": "Whether chunking mode is enabled.\nStructure is documented below.\n" + }, "defaultParsingConfig": { "$ref": "#/types/gcp:discoveryengine/DataStoreDocumentProcessingConfigDefaultParsingConfig:DataStoreDocumentProcessingConfigDefaultParsingConfig", "description": "Configurations for default Document parser. If not specified, this resource\nwill be configured to use a default DigitalParsingConfig, and the default parsing\nconfig will be applied to all file types for Document parsing.\nStructure is documented below.\n" @@ -78465,12 +79179,38 @@ } } }, + "gcp:discoveryengine/DataStoreDocumentProcessingConfigChunkingConfig:DataStoreDocumentProcessingConfigChunkingConfig": { + "properties": { + "layoutBasedChunkingConfig": { + "$ref": "#/types/gcp:discoveryengine/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig:DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig", + "description": "Configuration for the layout based chunking.\nStructure is documented below.\n" + } + }, + "type": "object" + }, + "gcp:discoveryengine/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig:DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig": { + "properties": { + "chunkSize": { + "type": "integer", + "description": "The token size limit for each chunk.\nSupported values: 100-500 (inclusive). Default value: 500.\n" + }, + "includeAncestorHeadings": { + "type": "boolean", + "description": "Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss.\nDefault value: False.\n" + } + }, + "type": "object" + }, "gcp:discoveryengine/DataStoreDocumentProcessingConfigDefaultParsingConfig:DataStoreDocumentProcessingConfigDefaultParsingConfig": { "properties": { "digitalParsingConfig": { "$ref": "#/types/gcp:discoveryengine/DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig:DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig", "description": "Configurations applied to digital parser.\n" }, + "layoutParsingConfig": { + "$ref": "#/types/gcp:discoveryengine/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig:DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig", + "description": "Configurations applied to layout parser.\n" + }, "ocrParsingConfig": { "$ref": "#/types/gcp:discoveryengine/DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig:DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig", "description": "Configurations applied to OCR parser. Currently it only applies to PDFs.\nStructure is documented below.\n" @@ -78481,6 +79221,9 @@ "gcp:discoveryengine/DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig:DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig": { "type": "object" }, + "gcp:discoveryengine/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig:DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig": { + "type": "object" + }, "gcp:discoveryengine/DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig:DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig": { "properties": { "useNativeText": { @@ -78500,6 +79243,10 @@ "type": "string", "description": "The identifier for this object. Format specified above.\n" }, + "layoutParsingConfig": { + "$ref": "#/types/gcp:discoveryengine/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig:DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig", + "description": "Configurations applied to layout parser.\n" + }, "ocrParsingConfig": { "$ref": "#/types/gcp:discoveryengine/DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig:DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig", "description": "Configurations applied to OCR parser. Currently it only applies to PDFs.\nStructure is documented below.\n" @@ -78513,6 +79260,9 @@ "gcp:discoveryengine/DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfig:DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfig": { "type": "object" }, + "gcp:discoveryengine/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig:DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig": { + "type": "object" + }, "gcp:discoveryengine/DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig:DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig": { "properties": { "useNativeText": { @@ -82971,7 +83721,7 @@ "properties": { "binauthz": { "$ref": "#/types/gcp:gkehub/FeatureMembershipConfigmanagementBinauthz:FeatureMembershipConfigmanagementBinauthz", - "description": "Binauthz configuration for the cluster. Structure is documented below.\n" + "description": "(Optional, Deprecated)\nBinauthz configuration for the cluster. Structure is documented below.\nThis field will be ignored and should not be set.\n" }, "configSync": { "$ref": "#/types/gcp:gkehub/FeatureMembershipConfigmanagementConfigSync:FeatureMembershipConfigmanagementConfigSync", @@ -82979,7 +83729,7 @@ }, "hierarchyController": { "$ref": "#/types/gcp:gkehub/FeatureMembershipConfigmanagementHierarchyController:FeatureMembershipConfigmanagementHierarchyController", - "description": "Hierarchy Controller configuration for the cluster. Structure is documented below.\n" + "description": "Hierarchy Controller configuration for the cluster. Structure is documented below.\nConfiguring Hierarchy Controller through the configmanagement feature is no longer recommended.\nUse open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead.\nFollow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller)\nto migrate from Hierarchy Controller to HNC.\n" }, "management": { "type": "string", @@ -82987,7 +83737,7 @@ }, "policyController": { "$ref": "#/types/gcp:gkehub/FeatureMembershipConfigmanagementPolicyController:FeatureMembershipConfigmanagementPolicyController", - "description": "Policy Controller configuration for the cluster. Structure is documented below.\n" + "description": "Policy Controller configuration for the cluster. Structure is documented below.\nConfiguring Policy Controller through the configmanagement feature is no longer recommended.\nUse the policycontroller feature instead.\n" }, "version": { "type": "string", @@ -87246,7 +87996,7 @@ "properties": { "idpMetadataXml": { "type": "string", - "description": "SAML Identity provider configuration metadata xml doc.\n" + "description": "SAML Identity provider configuration metadata xml doc.\n\n\u003ca name=\"nested_x509\"\u003e\u003c/a\u003eThe `x509` block supports:\n" } }, "type": "object", @@ -87254,6 +88004,58 @@ "idpMetadataXml" ] }, + "gcp:iam/WorkloadIdentityPoolProviderX509:WorkloadIdentityPoolProviderX509": { + "properties": { + "trustStore": { + "$ref": "#/types/gcp:iam/WorkloadIdentityPoolProviderX509TrustStore:WorkloadIdentityPoolProviderX509TrustStore", + "description": "A Trust store, use this trust store as a wrapper to config the trust\nanchor and optional intermediate cas to help build the trust chain for\nthe incoming end entity certificate. Follow the x509 guidelines to\ndefine those PEM encoded certs. Only 1 trust store is currently\nsupported.\n" + } + }, + "type": "object", + "required": [ + "trustStore" + ] + }, + "gcp:iam/WorkloadIdentityPoolProviderX509TrustStore:WorkloadIdentityPoolProviderX509TrustStore": { + "properties": { + "intermediateCas": { + "type": "array", + "items": { + "$ref": "#/types/gcp:iam/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa:WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa" + }, + "description": "Set of intermediate CA certificates used for building the trust chain to\ntrust anchor.\nIMPORTANT: Intermediate CAs are only supported when configuring x509 federation.\nStructure is documented below.\n" + }, + "trustAnchors": { + "type": "array", + "items": { + "$ref": "#/types/gcp:iam/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor:WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor" + }, + "description": "List of Trust Anchors to be used while performing validation\nagainst a given TrustStore. The incoming end entity's certificate\nmust be chained up to one of the trust anchors here.\nStructure is documented below.\n" + } + }, + "type": "object", + "required": [ + "trustAnchors" + ] + }, + "gcp:iam/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa:WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa": { + "properties": { + "pemCertificate": { + "type": "string", + "description": "PEM certificate of the PKI used for validation. Must only contain one\nca certificate(either root or intermediate cert).\n" + } + }, + "type": "object" + }, + "gcp:iam/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor:WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor": { + "properties": { + "pemCertificate": { + "type": "string", + "description": "PEM certificate of the PKI used for validation. Must only contain one\nca certificate(either root or intermediate cert).\n" + } + }, + "type": "object" + }, "gcp:iam/getTestablePermissionsPermission:getTestablePermissionsPermission": { "properties": { "apiDisabled": { @@ -87355,6 +88157,88 @@ } } }, + "gcp:iam/getWorkloadIdentityPoolProviderX509:getWorkloadIdentityPoolProviderX509": { + "properties": { + "trustStores": { + "type": "array", + "items": { + "$ref": "#/types/gcp:iam/getWorkloadIdentityPoolProviderX509TrustStore:getWorkloadIdentityPoolProviderX509TrustStore" + }, + "description": "A Trust store, use this trust store as a wrapper to config the trust\nanchor and optional intermediate cas to help build the trust chain for\nthe incoming end entity certificate. Follow the x509 guidelines to\ndefine those PEM encoded certs. Only 1 trust store is currently\nsupported.\n" + } + }, + "type": "object", + "required": [ + "trustStores" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, + "gcp:iam/getWorkloadIdentityPoolProviderX509TrustStore:getWorkloadIdentityPoolProviderX509TrustStore": { + "properties": { + "intermediateCas": { + "type": "array", + "items": { + "$ref": "#/types/gcp:iam/getWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa:getWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa" + }, + "description": "Set of intermediate CA certificates used for building the trust chain to\ntrust anchor.\nIMPORTANT: Intermediate CAs are only supported when configuring x509 federation.\n" + }, + "trustAnchors": { + "type": "array", + "items": { + "$ref": "#/types/gcp:iam/getWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor:getWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor" + }, + "description": "List of Trust Anchors to be used while performing validation\nagainst a given TrustStore. The incoming end entity's certificate\nmust be chained up to one of the trust anchors here.\n" + } + }, + "type": "object", + "required": [ + "intermediateCas", + "trustAnchors" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, + "gcp:iam/getWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa:getWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa": { + "properties": { + "pemCertificate": { + "type": "string", + "description": "PEM certificate of the PKI used for validation. Must only contain one\nca certificate(either root or intermediate cert).\n" + } + }, + "type": "object", + "required": [ + "pemCertificate" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, + "gcp:iam/getWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor:getWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor": { + "properties": { + "pemCertificate": { + "type": "string", + "description": "PEM certificate of the PKI used for validation. Must only contain one\nca certificate(either root or intermediate cert).\n" + } + }, + "type": "object", + "required": [ + "pemCertificate" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, "gcp:iap/AppEngineServiceIamBindingCondition:AppEngineServiceIamBindingCondition": { "properties": { "description": { @@ -89701,122 +90585,21 @@ } } }, - "gcp:kms/getCryptoKeysKey:getCryptoKeysKey": { + "gcp:kms/getCryptoKeyLatestVersionPublicKey:getCryptoKeyLatestVersionPublicKey": { "properties": { - "cryptoKeyBackend": { - "type": "string", - "description": "The resource name of the backend environment associated with all CryptoKeyVersions within this CryptoKey.\nThe resource name is in the format \"projects/*/locations/*/ekmConnections/*\" and only applies to \"EXTERNAL_VPC\" keys.\n" - }, - "destroyScheduledDuration": { - "type": "string", - "description": "The period of time that versions of this key spend in the DESTROY_SCHEDULED state before transitioning to DESTROYED.\nIf not specified at creation time, the default duration is 30 days.\n" - }, - "effectiveLabels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "secret": true - }, - "id": { - "type": "string" - }, - "importOnly": { - "type": "boolean", - "description": "Whether this key may contain imported versions only.\n" - }, - "keyAccessJustificationsPolicies": { - "type": "array", - "items": { - "$ref": "#/types/gcp:kms/getCryptoKeysKeyKeyAccessJustificationsPolicy:getCryptoKeysKeyKeyAccessJustificationsPolicy" - }, - "description": "The policy used for Key Access Justifications Policy Enforcement. If this\nfield is present and this key is enrolled in Key Access Justifications\nPolicy Enforcement, the policy will be evaluated in encrypt, decrypt, and\nsign operations, and the operation will fail if rejected by the policy. The\npolicy is defined by specifying zero or more allowed justification codes.\nhttps://cloud.google.com/assured-workloads/key-access-justifications/docs/justification-codes\nBy default, this field is absent, and all justification codes are allowed.\nThis field is currently in beta and is subject to change.\n" - }, - "keyRing": { - "type": "string", - "description": "The key ring that the keys belongs to. Format: 'projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}'.,\n" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "Labels with user-defined metadata to apply to this resource.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.\n" - }, - "name": { - "type": "string", - "description": "The resource name for the CryptoKey.\n" - }, - "primaries": { - "type": "array", - "items": { - "$ref": "#/types/gcp:kms/getCryptoKeysKeyPrimary:getCryptoKeysKeyPrimary" - }, - "description": "A copy of the primary CryptoKeyVersion that will be used by cryptoKeys.encrypt when this CryptoKey is given in EncryptRequest.name.\nKeys with purpose ENCRYPT_DECRYPT may have a primary. For other keys, this field will be unset.\n" - }, - "pulumiLabels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.\n", - "secret": true - }, - "purpose": { + "algorithm": { "type": "string", - "description": "The immutable purpose of this CryptoKey. See the\n[purpose reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose)\nfor possible inputs.\nDefault value is \"ENCRYPT_DECRYPT\".\n" + "description": "The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports.\n" }, - "rotationPeriod": { + "pem": { "type": "string", - "description": "Every time this period passes, generate a new CryptoKeyVersion and set it as the primary.\nThe first rotation will take place after the specified period. The rotation period has\nthe format of a decimal number with up to 9 fractional digits, followed by the\nletter 's' (seconds). It must be greater than a day (ie, 86400).\n" - }, - "skipInitialVersionCreation": { - "type": "boolean", - "description": "If set to true, the request will create a CryptoKey without any CryptoKeyVersions.\nYou must use the 'google_kms_crypto_key_version' resource to create a new CryptoKeyVersion\nor 'google_kms_key_ring_import_job' resource to import the CryptoKeyVersion.\n" - }, - "versionTemplates": { - "type": "array", - "items": { - "$ref": "#/types/gcp:kms/getCryptoKeysKeyVersionTemplate:getCryptoKeysKeyVersionTemplate" - }, - "description": "A template describing settings for new crypto key versions.\n" - } - }, - "type": "object", - "required": [ - "cryptoKeyBackend", - "destroyScheduledDuration", - "effectiveLabels", - "id", - "importOnly", - "keyAccessJustificationsPolicies", - "labels", - "primaries", - "purpose", - "rotationPeriod", - "skipInitialVersionCreation", - "pulumiLabels", - "versionTemplates" - ], - "language": { - "nodejs": { - "requiredInputs": [] - } - } - }, - "gcp:kms/getCryptoKeysKeyKeyAccessJustificationsPolicy:getCryptoKeysKeyKeyAccessJustificationsPolicy": { - "properties": { - "allowedAccessReasons": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The list of allowed reasons for access to this CryptoKey. Zero allowed\naccess reasons means all encrypt, decrypt, and sign operations for\nthis CryptoKey will fail.\n" + "description": "The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info.\n" } }, "type": "object", "required": [ - "allowedAccessReasons" + "algorithm", + "pem" ], "language": { "nodejs": { @@ -89824,21 +90607,21 @@ } } }, - "gcp:kms/getCryptoKeysKeyPrimary:getCryptoKeysKeyPrimary": { + "gcp:kms/getCryptoKeyVersionsPublicKey:getCryptoKeyVersionsPublicKey": { "properties": { - "name": { + "algorithm": { "type": "string", - "description": "The resource name for this CryptoKeyVersion.\n" + "description": "The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports.\n" }, - "state": { + "pem": { "type": "string", - "description": "The current state of the CryptoKeyVersion.\n" + "description": "The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info.\n" } }, "type": "object", "required": [ - "name", - "state" + "algorithm", + "pem" ], "language": { "nodejs": { @@ -89846,63 +90629,48 @@ } } }, - "gcp:kms/getCryptoKeysKeyVersionTemplate:getCryptoKeysKeyVersionTemplate": { + "gcp:kms/getCryptoKeyVersionsVersion:getCryptoKeyVersionsVersion": { "properties": { "algorithm": { "type": "string", - "description": "The algorithm to use when creating a version based on this template.\nSee the [algorithm reference](https://cloud.google.com/kms/docs/reference/rest/v1/CryptoKeyVersionAlgorithm) for possible inputs.\n" + "description": "The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports.\n" }, - "protectionLevel": { + "cryptoKey": { "type": "string", - "description": "The protection level to use when creating a version based on this template. Possible values include \"SOFTWARE\", \"HSM\", \"EXTERNAL\", \"EXTERNAL_VPC\". Defaults to \"SOFTWARE\".\n" - } - }, - "type": "object", - "required": [ - "algorithm", - "protectionLevel" - ], - "language": { - "nodejs": { - "requiredInputs": [] - } - } - }, - "gcp:kms/getKMSCryptoKeyKeyAccessJustificationsPolicy:getKMSCryptoKeyKeyAccessJustificationsPolicy": { - "properties": { - "allowedAccessReasons": { + "description": "The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the \n`gcp.kms.CryptoKey` resource/datasource.\n" + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "protectionLevel": { + "type": "string" + }, + "publicKeys": { "type": "array", "items": { - "type": "string" - }, - "description": "The list of allowed reasons for access to this CryptoKey. Zero allowed\naccess reasons means all encrypt, decrypt, and sign operations for\nthis CryptoKey will fail.\n" - } - }, - "type": "object", - "required": [ - "allowedAccessReasons" - ], - "language": { - "nodejs": { - "requiredInputs": [] - } - } - }, - "gcp:kms/getKMSCryptoKeyPrimary:getKMSCryptoKeyPrimary": { - "properties": { - "name": { - "type": "string", - "description": "The CryptoKey's name.\nA CryptoKey’s name belonging to the specified Google Cloud Platform KeyRing and match the regular expression `[a-zA-Z0-9_-]{1,63}`\n" + "$ref": "#/types/gcp:kms/getCryptoKeyVersionsVersionPublicKey:getCryptoKeyVersionsVersionPublicKey" + } }, "state": { - "type": "string", - "description": "The current state of the CryptoKeyVersion.\n" + "type": "string" + }, + "version": { + "type": "integer" } }, "type": "object", "required": [ + "algorithm", + "cryptoKey", + "id", "name", - "state" + "protectionLevel", + "publicKeys", + "state", + "version" ], "language": { "nodejs": { @@ -89910,7 +90678,238 @@ } } }, - "gcp:kms/getKMSCryptoKeyVersionPublicKey:getKMSCryptoKeyVersionPublicKey": { + "gcp:kms/getCryptoKeyVersionsVersionPublicKey:getCryptoKeyVersionsVersionPublicKey": { + "properties": { + "algorithm": { + "type": "string", + "description": "The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports.\n" + }, + "pem": { + "type": "string", + "description": "The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info.\n" + } + }, + "type": "object", + "required": [ + "algorithm", + "pem" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, + "gcp:kms/getCryptoKeysKey:getCryptoKeysKey": { + "properties": { + "cryptoKeyBackend": { + "type": "string", + "description": "The resource name of the backend environment associated with all CryptoKeyVersions within this CryptoKey.\nThe resource name is in the format \"projects/*/locations/*/ekmConnections/*\" and only applies to \"EXTERNAL_VPC\" keys.\n" + }, + "destroyScheduledDuration": { + "type": "string", + "description": "The period of time that versions of this key spend in the DESTROY_SCHEDULED state before transitioning to DESTROYED.\nIf not specified at creation time, the default duration is 30 days.\n" + }, + "effectiveLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "secret": true + }, + "id": { + "type": "string" + }, + "importOnly": { + "type": "boolean", + "description": "Whether this key may contain imported versions only.\n" + }, + "keyAccessJustificationsPolicies": { + "type": "array", + "items": { + "$ref": "#/types/gcp:kms/getCryptoKeysKeyKeyAccessJustificationsPolicy:getCryptoKeysKeyKeyAccessJustificationsPolicy" + }, + "description": "The policy used for Key Access Justifications Policy Enforcement. If this\nfield is present and this key is enrolled in Key Access Justifications\nPolicy Enforcement, the policy will be evaluated in encrypt, decrypt, and\nsign operations, and the operation will fail if rejected by the policy. The\npolicy is defined by specifying zero or more allowed justification codes.\nhttps://cloud.google.com/assured-workloads/key-access-justifications/docs/justification-codes\nBy default, this field is absent, and all justification codes are allowed.\nThis field is currently in beta and is subject to change.\n" + }, + "keyRing": { + "type": "string", + "description": "The key ring that the keys belongs to. Format: 'projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}'.,\n" + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Labels with user-defined metadata to apply to this resource.\n\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field 'effective_labels' for all of the labels present on the resource.\n" + }, + "name": { + "type": "string", + "description": "The resource name for the CryptoKey.\n" + }, + "primaries": { + "type": "array", + "items": { + "$ref": "#/types/gcp:kms/getCryptoKeysKeyPrimary:getCryptoKeysKeyPrimary" + }, + "description": "A copy of the primary CryptoKeyVersion that will be used by cryptoKeys.encrypt when this CryptoKey is given in EncryptRequest.name.\nKeys with purpose ENCRYPT_DECRYPT may have a primary. For other keys, this field will be unset.\n" + }, + "pulumiLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "The combination of labels configured directly on the resource\n and default labels configured on the provider.\n", + "secret": true + }, + "purpose": { + "type": "string", + "description": "The immutable purpose of this CryptoKey. See the\n[purpose reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose)\nfor possible inputs.\nDefault value is \"ENCRYPT_DECRYPT\".\n" + }, + "rotationPeriod": { + "type": "string", + "description": "Every time this period passes, generate a new CryptoKeyVersion and set it as the primary.\nThe first rotation will take place after the specified period. The rotation period has\nthe format of a decimal number with up to 9 fractional digits, followed by the\nletter 's' (seconds). It must be greater than a day (ie, 86400).\n" + }, + "skipInitialVersionCreation": { + "type": "boolean", + "description": "If set to true, the request will create a CryptoKey without any CryptoKeyVersions.\nYou must use the 'google_kms_crypto_key_version' resource to create a new CryptoKeyVersion\nor 'google_kms_key_ring_import_job' resource to import the CryptoKeyVersion.\n" + }, + "versionTemplates": { + "type": "array", + "items": { + "$ref": "#/types/gcp:kms/getCryptoKeysKeyVersionTemplate:getCryptoKeysKeyVersionTemplate" + }, + "description": "A template describing settings for new crypto key versions.\n" + } + }, + "type": "object", + "required": [ + "cryptoKeyBackend", + "destroyScheduledDuration", + "effectiveLabels", + "id", + "importOnly", + "keyAccessJustificationsPolicies", + "labels", + "primaries", + "purpose", + "rotationPeriod", + "skipInitialVersionCreation", + "pulumiLabels", + "versionTemplates" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, + "gcp:kms/getCryptoKeysKeyKeyAccessJustificationsPolicy:getCryptoKeysKeyKeyAccessJustificationsPolicy": { + "properties": { + "allowedAccessReasons": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The list of allowed reasons for access to this CryptoKey. Zero allowed\naccess reasons means all encrypt, decrypt, and sign operations for\nthis CryptoKey will fail.\n" + } + }, + "type": "object", + "required": [ + "allowedAccessReasons" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, + "gcp:kms/getCryptoKeysKeyPrimary:getCryptoKeysKeyPrimary": { + "properties": { + "name": { + "type": "string", + "description": "The resource name for this CryptoKeyVersion.\n" + }, + "state": { + "type": "string", + "description": "The current state of the CryptoKeyVersion.\n" + } + }, + "type": "object", + "required": [ + "name", + "state" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, + "gcp:kms/getCryptoKeysKeyVersionTemplate:getCryptoKeysKeyVersionTemplate": { + "properties": { + "algorithm": { + "type": "string", + "description": "The algorithm to use when creating a version based on this template.\nSee the [algorithm reference](https://cloud.google.com/kms/docs/reference/rest/v1/CryptoKeyVersionAlgorithm) for possible inputs.\n" + }, + "protectionLevel": { + "type": "string", + "description": "The protection level to use when creating a version based on this template. Possible values include \"SOFTWARE\", \"HSM\", \"EXTERNAL\", \"EXTERNAL_VPC\". Defaults to \"SOFTWARE\".\n" + } + }, + "type": "object", + "required": [ + "algorithm", + "protectionLevel" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, + "gcp:kms/getKMSCryptoKeyKeyAccessJustificationsPolicy:getKMSCryptoKeyKeyAccessJustificationsPolicy": { + "properties": { + "allowedAccessReasons": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The list of allowed reasons for access to this CryptoKey. Zero allowed\naccess reasons means all encrypt, decrypt, and sign operations for\nthis CryptoKey will fail.\n" + } + }, + "type": "object", + "required": [ + "allowedAccessReasons" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, + "gcp:kms/getKMSCryptoKeyPrimary:getKMSCryptoKeyPrimary": { + "properties": { + "name": { + "type": "string", + "description": "The CryptoKey's name.\nA CryptoKey’s name belonging to the specified Google Cloud Platform KeyRing and match the regular expression `[a-zA-Z0-9_-]{1,63}`\n" + }, + "state": { + "type": "string", + "description": "The current state of the CryptoKeyVersion.\n" + } + }, + "type": "object", + "required": [ + "name", + "state" + ], + "language": { + "nodejs": { + "requiredInputs": [] + } + } + }, + "gcp:kms/getKMSCryptoKeyVersionPublicKey:getKMSCryptoKeyVersionPublicKey": { "properties": { "algorithm": { "type": "string", @@ -92998,6 +93997,14 @@ "description": "IP ranges encompassing the subnets to be excluded from peering.\n", "willReplaceOnChanges": true }, + "includeExportRanges": { + "type": "array", + "items": { + "type": "string" + }, + "description": "IP ranges allowed to be included from peering.\n", + "willReplaceOnChanges": true + }, "uri": { "type": "string", "description": "The URI of the VPC network resource.\n", @@ -99502,6 +100509,10 @@ "type": "string", "description": "The maximum duration that can elapse before a new Cloud Storage file is created. Min 1 minute, max 10 minutes, default 5 minutes.\nMay not exceed the subscription's acknowledgement deadline.\nA duration in seconds with up to nine fractional digits, ending with 's'. Example: \"3.5s\".\n" }, + "maxMessages": { + "type": "integer", + "description": "The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages.\n" + }, "serviceAccountEmail": { "type": "string", "description": "The service account to use to write to Cloud Storage. If not specified, the Pub/Sub\n[service agent](https://cloud.google.com/iam/docs/service-agents),\nservice-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used.\n" @@ -99526,6 +100537,10 @@ }, "gcp:pubsub/SubscriptionCloudStorageConfigAvroConfig:SubscriptionCloudStorageConfigAvroConfig": { "properties": { + "useTopicSchema": { + "type": "boolean", + "description": "When true, the output Cloud Storage file will be serialized using the topic schema, if it exists.\n" + }, "writeMetadata": { "type": "boolean", "description": "When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output.\n" @@ -99860,6 +100875,10 @@ "type": "string", "description": "The maximum duration that can elapse before a new Cloud Storage file is created. Min 1 minute, max 10 minutes, default 5 minutes.\nMay not exceed the subscription's acknowledgement deadline.\nA duration in seconds with up to nine fractional digits, ending with 's'. Example: \"3.5s\".\n" }, + "maxMessages": { + "type": "integer", + "description": "The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages.\n" + }, "serviceAccountEmail": { "type": "string", "description": "The service account to use to write to Cloud Storage. If not specified, the Pub/Sub\n[service agent](https://cloud.google.com/iam/docs/service-agents),\nservice-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used.\n" @@ -99878,6 +100897,7 @@ "filenameSuffix", "maxBytes", "maxDuration", + "maxMessages", "serviceAccountEmail", "state" ], @@ -99889,6 +100909,10 @@ }, "gcp:pubsub/getSubscriptionCloudStorageConfigAvroConfig:getSubscriptionCloudStorageConfigAvroConfig": { "properties": { + "useTopicSchema": { + "type": "boolean", + "description": "When true, the output Cloud Storage file will be serialized using the topic schema, if it exists.\n" + }, "writeMetadata": { "type": "boolean", "description": "When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output.\n" @@ -99896,6 +100920,7 @@ }, "type": "object", "required": [ + "useTopicSchema", "writeMetadata" ], "language": { @@ -100278,6 +101303,111 @@ }, "type": "object" }, + "gcp:redis/ClusterMaintenancePolicy:ClusterMaintenancePolicy": { + "properties": { + "createTime": { + "type": "string", + "description": "(Output)\nOutput only. The time when the policy was created.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond\nresolution and up to nine fractional digits.\n" + }, + "updateTime": { + "type": "string", + "description": "(Output)\nOutput only. The time when the policy was last updated.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond\nresolution and up to nine fractional digits.\n" + }, + "weeklyMaintenanceWindows": { + "type": "array", + "items": { + "$ref": "#/types/gcp:redis/ClusterMaintenancePolicyWeeklyMaintenanceWindow:ClusterMaintenancePolicyWeeklyMaintenanceWindow" + }, + "description": "Optional. Maintenance window that is applied to resources covered by this policy.\nMinimum 1. For the current version, the maximum number\nof weekly_window is expected to be one.\nStructure is documented below.\n" + } + }, + "type": "object", + "language": { + "nodejs": { + "requiredOutputs": [ + "createTime", + "updateTime" + ] + } + } + }, + "gcp:redis/ClusterMaintenancePolicyWeeklyMaintenanceWindow:ClusterMaintenancePolicyWeeklyMaintenanceWindow": { + "properties": { + "day": { + "type": "string", + "description": "Required. The day of week that maintenance updates occur.\n- DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified.\n- MONDAY: Monday\n- TUESDAY: Tuesday\n- WEDNESDAY: Wednesday\n- THURSDAY: Thursday\n- FRIDAY: Friday\n- SATURDAY: Saturday\n- SUNDAY: Sunday\nPossible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`.\n" + }, + "duration": { + "type": "string", + "description": "(Output)\nOutput only. Duration of the maintenance window.\nThe current window is fixed at 1 hour.\nA duration in seconds with up to nine fractional digits,\nterminated by 's'. Example: \"3.5s\".\n" + }, + "startTime": { + "$ref": "#/types/gcp:redis/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime:ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime", + "description": "Required. Start time of the window in UTC time.\nStructure is documented below.\n" + } + }, + "type": "object", + "required": [ + "day", + "startTime" + ], + "language": { + "nodejs": { + "requiredOutputs": [ + "day", + "duration", + "startTime" + ] + } + } + }, + "gcp:redis/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime:ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime": { + "properties": { + "hours": { + "type": "integer", + "description": "Hours of day in 24 hour format. Should be from 0 to 23.\nAn API may choose to allow the value \"24:00:00\" for scenarios like business closing time.\n" + }, + "minutes": { + "type": "integer", + "description": "Minutes of hour of day. Must be from 0 to 59.\n" + }, + "nanos": { + "type": "integer", + "description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.\n" + }, + "seconds": { + "type": "integer", + "description": "Seconds of minutes of the time. Must normally be from 0 to 59.\nAn API may allow the value 60 if it allows leap-seconds.\n" + } + }, + "type": "object" + }, + "gcp:redis/ClusterMaintenanceSchedule:ClusterMaintenanceSchedule": { + "properties": { + "endTime": { + "type": "string", + "description": "(Output)\nOutput only. The end time of any upcoming scheduled maintenance for this cluster.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond\nresolution and up to nine fractional digits.\n" + }, + "scheduleDeadlineTime": { + "type": "string", + "description": "(Output)\nOutput only. The deadline that the maintenance schedule start time\ncan not go beyond, including reschedule.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond\nresolution and up to nine fractional digits.\n" + }, + "startTime": { + "type": "string", + "description": "(Output)\nOutput only. The start time of any upcoming scheduled maintenance for this cluster.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond\nresolution and up to nine fractional digits.\n" + } + }, + "type": "object", + "language": { + "nodejs": { + "requiredOutputs": [ + "endTime", + "scheduleDeadlineTime", + "startTime" + ] + } + } + }, "gcp:redis/ClusterPscConfig:ClusterPscConfig": { "properties": { "network": { @@ -114636,7 +115766,7 @@ }, "deletionPolicy": { "type": "string", - "description": "Policy to determine if the cluster should be deleted forcefully.\nDeleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster.\nDeleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = \"FORCE\" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance.\n" + "description": "Policy to determine if the cluster should be deleted forcefully.\nDeleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster.\nDeleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = \"FORCE\" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance.\nPossible values: DEFAULT, FORCE\n" }, "displayName": { "type": "string", @@ -114741,6 +115871,17 @@ "type": "string", "description": "Output only. The current serving state of the cluster.\n" }, + "subscriptionType": { + "type": "string", + "description": "The subscrition type of cluster.\nPossible values are: `TRIAL`, `STANDARD`.\n" + }, + "trialMetadatas": { + "type": "array", + "items": { + "$ref": "#/types/gcp:alloydb/ClusterTrialMetadata:ClusterTrialMetadata" + }, + "description": "Contains information and all metadata related to TRIAL clusters.\nStructure is documented below.\n" + }, "uid": { "type": "string", "description": "The system-generated UID of the resource.\n" @@ -114763,7 +115904,9 @@ "project", "reconciling", "state", + "subscriptionType", "pulumiLabels", + "trialMetadatas", "uid" ], "inputProperties": { @@ -114797,7 +115940,7 @@ }, "deletionPolicy": { "type": "string", - "description": "Policy to determine if the cluster should be deleted forcefully.\nDeleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster.\nDeleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = \"FORCE\" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance.\n" + "description": "Policy to determine if the cluster should be deleted forcefully.\nDeleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster.\nDeleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = \"FORCE\" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance.\nPossible values: DEFAULT, FORCE\n" }, "displayName": { "type": "string", @@ -114857,6 +116000,10 @@ "secondaryConfig": { "$ref": "#/types/gcp:alloydb/ClusterSecondaryConfig:ClusterSecondaryConfig", "description": "Configuration of the secondary cluster for Cross Region Replication. This should be set if and only if the cluster is of type SECONDARY.\nStructure is documented below.\n" + }, + "subscriptionType": { + "type": "string", + "description": "The subscrition type of cluster.\nPossible values are: `TRIAL`, `STANDARD`.\n" } }, "requiredInputs": [ @@ -114910,7 +116057,7 @@ }, "deletionPolicy": { "type": "string", - "description": "Policy to determine if the cluster should be deleted forcefully.\nDeleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster.\nDeleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = \"FORCE\" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance.\n" + "description": "Policy to determine if the cluster should be deleted forcefully.\nDeleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster.\nDeleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = \"FORCE\" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance.\nPossible values: DEFAULT, FORCE\n" }, "displayName": { "type": "string", @@ -115019,6 +116166,17 @@ "type": "string", "description": "Output only. The current serving state of the cluster.\n" }, + "subscriptionType": { + "type": "string", + "description": "The subscrition type of cluster.\nPossible values are: `TRIAL`, `STANDARD`.\n" + }, + "trialMetadatas": { + "type": "array", + "items": { + "$ref": "#/types/gcp:alloydb/ClusterTrialMetadata:ClusterTrialMetadata" + }, + "description": "Contains information and all metadata related to TRIAL clusters.\nStructure is documented below.\n" + }, "uid": { "type": "string", "description": "The system-generated UID of the resource.\n" @@ -122451,7 +123609,7 @@ } }, "gcp:assuredworkloads/workload:Workload": { - "description": "The AssuredWorkloads Workload resource\n\n## Example Usage\n\n### Basic_workload\nA basic test of a assuredworkloads api\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst primary = new gcp.assuredworkloads.Workload(\"primary\", {\n complianceRegime: \"FEDRAMP_MODERATE\",\n displayName: \"{{display}}\",\n location: \"us-west1\",\n organization: \"123456789\",\n billingAccount: \"billingAccounts/000000-0000000-0000000-000000\",\n kmsSettings: {\n nextRotationTime: \"9999-10-02T15:01:23Z\",\n rotationPeriod: \"10368000s\",\n },\n provisionedResourcesParent: \"folders/519620126891\",\n resourceSettings: [\n {\n displayName: \"folder-display-name\",\n resourceType: \"CONSUMER_FOLDER\",\n },\n {\n resourceType: \"ENCRYPTION_KEYS_PROJECT\",\n },\n {\n resourceId: \"ring\",\n resourceType: \"KEYRING\",\n },\n ],\n violationNotificationsEnabled: true,\n labels: {\n \"label-one\": \"value-one\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nprimary = gcp.assuredworkloads.Workload(\"primary\",\n compliance_regime=\"FEDRAMP_MODERATE\",\n display_name=\"{{display}}\",\n location=\"us-west1\",\n organization=\"123456789\",\n billing_account=\"billingAccounts/000000-0000000-0000000-000000\",\n kms_settings={\n \"next_rotation_time\": \"9999-10-02T15:01:23Z\",\n \"rotation_period\": \"10368000s\",\n },\n provisioned_resources_parent=\"folders/519620126891\",\n resource_settings=[\n {\n \"display_name\": \"folder-display-name\",\n \"resource_type\": \"CONSUMER_FOLDER\",\n },\n {\n \"resource_type\": \"ENCRYPTION_KEYS_PROJECT\",\n },\n {\n \"resource_id\": \"ring\",\n \"resource_type\": \"KEYRING\",\n },\n ],\n violation_notifications_enabled=True,\n labels={\n \"label-one\": \"value-one\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var primary = new Gcp.AssuredWorkloads.Workload(\"primary\", new()\n {\n ComplianceRegime = \"FEDRAMP_MODERATE\",\n DisplayName = \"{{display}}\",\n Location = \"us-west1\",\n Organization = \"123456789\",\n BillingAccount = \"billingAccounts/000000-0000000-0000000-000000\",\n KmsSettings = new Gcp.AssuredWorkloads.Inputs.WorkloadKmsSettingsArgs\n {\n NextRotationTime = \"9999-10-02T15:01:23Z\",\n RotationPeriod = \"10368000s\",\n },\n ProvisionedResourcesParent = \"folders/519620126891\",\n ResourceSettings = new[]\n {\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n DisplayName = \"folder-display-name\",\n ResourceType = \"CONSUMER_FOLDER\",\n },\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n ResourceType = \"ENCRYPTION_KEYS_PROJECT\",\n },\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n ResourceId = \"ring\",\n ResourceType = \"KEYRING\",\n },\n },\n ViolationNotificationsEnabled = true,\n Labels = \n {\n { \"label-one\", \"value-one\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/assuredworkloads\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := assuredworkloads.NewWorkload(ctx, \"primary\", \u0026assuredworkloads.WorkloadArgs{\n\t\t\tComplianceRegime: pulumi.String(\"FEDRAMP_MODERATE\"),\n\t\t\tDisplayName: pulumi.String(\"{{display}}\"),\n\t\t\tLocation: pulumi.String(\"us-west1\"),\n\t\t\tOrganization: pulumi.String(\"123456789\"),\n\t\t\tBillingAccount: pulumi.String(\"billingAccounts/000000-0000000-0000000-000000\"),\n\t\t\tKmsSettings: \u0026assuredworkloads.WorkloadKmsSettingsArgs{\n\t\t\t\tNextRotationTime: pulumi.String(\"9999-10-02T15:01:23Z\"),\n\t\t\t\tRotationPeriod: pulumi.String(\"10368000s\"),\n\t\t\t},\n\t\t\tProvisionedResourcesParent: pulumi.String(\"folders/519620126891\"),\n\t\t\tResourceSettings: assuredworkloads.WorkloadResourceSettingArray{\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tDisplayName: pulumi.String(\"folder-display-name\"),\n\t\t\t\t\tResourceType: pulumi.String(\"CONSUMER_FOLDER\"),\n\t\t\t\t},\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tResourceType: pulumi.String(\"ENCRYPTION_KEYS_PROJECT\"),\n\t\t\t\t},\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tResourceId: pulumi.String(\"ring\"),\n\t\t\t\t\tResourceType: pulumi.String(\"KEYRING\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tViolationNotificationsEnabled: pulumi.Bool(true),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"label-one\": pulumi.String(\"value-one\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.assuredworkloads.Workload;\nimport com.pulumi.gcp.assuredworkloads.WorkloadArgs;\nimport com.pulumi.gcp.assuredworkloads.inputs.WorkloadKmsSettingsArgs;\nimport com.pulumi.gcp.assuredworkloads.inputs.WorkloadResourceSettingArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var primary = new Workload(\"primary\", WorkloadArgs.builder()\n .complianceRegime(\"FEDRAMP_MODERATE\")\n .displayName(\"{{display}}\")\n .location(\"us-west1\")\n .organization(\"123456789\")\n .billingAccount(\"billingAccounts/000000-0000000-0000000-000000\")\n .kmsSettings(WorkloadKmsSettingsArgs.builder()\n .nextRotationTime(\"9999-10-02T15:01:23Z\")\n .rotationPeriod(\"10368000s\")\n .build())\n .provisionedResourcesParent(\"folders/519620126891\")\n .resourceSettings( \n WorkloadResourceSettingArgs.builder()\n .displayName(\"folder-display-name\")\n .resourceType(\"CONSUMER_FOLDER\")\n .build(),\n WorkloadResourceSettingArgs.builder()\n .resourceType(\"ENCRYPTION_KEYS_PROJECT\")\n .build(),\n WorkloadResourceSettingArgs.builder()\n .resourceId(\"ring\")\n .resourceType(\"KEYRING\")\n .build())\n .violationNotificationsEnabled(true)\n .labels(Map.of(\"label-one\", \"value-one\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n primary:\n type: gcp:assuredworkloads:Workload\n properties:\n complianceRegime: FEDRAMP_MODERATE\n displayName: '{{display}}'\n location: us-west1\n organization: '123456789'\n billingAccount: billingAccounts/000000-0000000-0000000-000000\n kmsSettings:\n nextRotationTime: 9999-10-02T15:01:23Z\n rotationPeriod: 10368000s\n provisionedResourcesParent: folders/519620126891\n resourceSettings:\n - displayName: folder-display-name\n resourceType: CONSUMER_FOLDER\n - resourceType: ENCRYPTION_KEYS_PROJECT\n - resourceId: ring\n resourceType: KEYRING\n violationNotificationsEnabled: true\n labels:\n label-one: value-one\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Sovereign_controls_workload\nA Sovereign Controls test of the assuredworkloads api\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst primary = new gcp.assuredworkloads.Workload(\"primary\", {\n complianceRegime: \"EU_REGIONS_AND_SUPPORT\",\n displayName: \"display\",\n location: \"europe-west9\",\n organization: \"123456789\",\n billingAccount: \"billingAccounts/000000-0000000-0000000-000000\",\n enableSovereignControls: true,\n kmsSettings: {\n nextRotationTime: \"9999-10-02T15:01:23Z\",\n rotationPeriod: \"10368000s\",\n },\n resourceSettings: [\n {\n resourceType: \"CONSUMER_FOLDER\",\n },\n {\n resourceType: \"ENCRYPTION_KEYS_PROJECT\",\n },\n {\n resourceId: \"ring\",\n resourceType: \"KEYRING\",\n },\n ],\n labels: {\n \"label-one\": \"value-one\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nprimary = gcp.assuredworkloads.Workload(\"primary\",\n compliance_regime=\"EU_REGIONS_AND_SUPPORT\",\n display_name=\"display\",\n location=\"europe-west9\",\n organization=\"123456789\",\n billing_account=\"billingAccounts/000000-0000000-0000000-000000\",\n enable_sovereign_controls=True,\n kms_settings={\n \"next_rotation_time\": \"9999-10-02T15:01:23Z\",\n \"rotation_period\": \"10368000s\",\n },\n resource_settings=[\n {\n \"resource_type\": \"CONSUMER_FOLDER\",\n },\n {\n \"resource_type\": \"ENCRYPTION_KEYS_PROJECT\",\n },\n {\n \"resource_id\": \"ring\",\n \"resource_type\": \"KEYRING\",\n },\n ],\n labels={\n \"label-one\": \"value-one\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var primary = new Gcp.AssuredWorkloads.Workload(\"primary\", new()\n {\n ComplianceRegime = \"EU_REGIONS_AND_SUPPORT\",\n DisplayName = \"display\",\n Location = \"europe-west9\",\n Organization = \"123456789\",\n BillingAccount = \"billingAccounts/000000-0000000-0000000-000000\",\n EnableSovereignControls = true,\n KmsSettings = new Gcp.AssuredWorkloads.Inputs.WorkloadKmsSettingsArgs\n {\n NextRotationTime = \"9999-10-02T15:01:23Z\",\n RotationPeriod = \"10368000s\",\n },\n ResourceSettings = new[]\n {\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n ResourceType = \"CONSUMER_FOLDER\",\n },\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n ResourceType = \"ENCRYPTION_KEYS_PROJECT\",\n },\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n ResourceId = \"ring\",\n ResourceType = \"KEYRING\",\n },\n },\n Labels = \n {\n { \"label-one\", \"value-one\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/assuredworkloads\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := assuredworkloads.NewWorkload(ctx, \"primary\", \u0026assuredworkloads.WorkloadArgs{\n\t\t\tComplianceRegime: pulumi.String(\"EU_REGIONS_AND_SUPPORT\"),\n\t\t\tDisplayName: pulumi.String(\"display\"),\n\t\t\tLocation: pulumi.String(\"europe-west9\"),\n\t\t\tOrganization: pulumi.String(\"123456789\"),\n\t\t\tBillingAccount: pulumi.String(\"billingAccounts/000000-0000000-0000000-000000\"),\n\t\t\tEnableSovereignControls: pulumi.Bool(true),\n\t\t\tKmsSettings: \u0026assuredworkloads.WorkloadKmsSettingsArgs{\n\t\t\t\tNextRotationTime: pulumi.String(\"9999-10-02T15:01:23Z\"),\n\t\t\t\tRotationPeriod: pulumi.String(\"10368000s\"),\n\t\t\t},\n\t\t\tResourceSettings: assuredworkloads.WorkloadResourceSettingArray{\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tResourceType: pulumi.String(\"CONSUMER_FOLDER\"),\n\t\t\t\t},\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tResourceType: pulumi.String(\"ENCRYPTION_KEYS_PROJECT\"),\n\t\t\t\t},\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tResourceId: pulumi.String(\"ring\"),\n\t\t\t\t\tResourceType: pulumi.String(\"KEYRING\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"label-one\": pulumi.String(\"value-one\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.assuredworkloads.Workload;\nimport com.pulumi.gcp.assuredworkloads.WorkloadArgs;\nimport com.pulumi.gcp.assuredworkloads.inputs.WorkloadKmsSettingsArgs;\nimport com.pulumi.gcp.assuredworkloads.inputs.WorkloadResourceSettingArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var primary = new Workload(\"primary\", WorkloadArgs.builder()\n .complianceRegime(\"EU_REGIONS_AND_SUPPORT\")\n .displayName(\"display\")\n .location(\"europe-west9\")\n .organization(\"123456789\")\n .billingAccount(\"billingAccounts/000000-0000000-0000000-000000\")\n .enableSovereignControls(true)\n .kmsSettings(WorkloadKmsSettingsArgs.builder()\n .nextRotationTime(\"9999-10-02T15:01:23Z\")\n .rotationPeriod(\"10368000s\")\n .build())\n .resourceSettings( \n WorkloadResourceSettingArgs.builder()\n .resourceType(\"CONSUMER_FOLDER\")\n .build(),\n WorkloadResourceSettingArgs.builder()\n .resourceType(\"ENCRYPTION_KEYS_PROJECT\")\n .build(),\n WorkloadResourceSettingArgs.builder()\n .resourceId(\"ring\")\n .resourceType(\"KEYRING\")\n .build())\n .labels(Map.of(\"label-one\", \"value-one\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n primary:\n type: gcp:assuredworkloads:Workload\n properties:\n complianceRegime: EU_REGIONS_AND_SUPPORT\n displayName: display\n location: europe-west9\n organization: '123456789'\n billingAccount: billingAccounts/000000-0000000-0000000-000000\n enableSovereignControls: true\n kmsSettings:\n nextRotationTime: 9999-10-02T15:01:23Z\n rotationPeriod: 10368000s\n resourceSettings:\n - resourceType: CONSUMER_FOLDER\n - resourceType: ENCRYPTION_KEYS_PROJECT\n - resourceId: ring\n resourceType: KEYRING\n labels:\n label-one: value-one\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nWorkload can be imported using any of these accepted formats:\n\n* `organizations/{{organization}}/locations/{{location}}/workloads/{{name}}`\n\n* `{{organization}}/{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, Workload can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:assuredworkloads/workload:Workload default organizations/{{organization}}/locations/{{location}}/workloads/{{name}}\n```\n\n```sh\n$ pulumi import gcp:assuredworkloads/workload:Workload default {{organization}}/{{location}}/{{name}}\n```\n\n", + "description": "The AssuredWorkloads Workload resource\n\n## Example Usage\n\n### Basic_workload\nA basic test of a assuredworkloads api\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst primary = new gcp.assuredworkloads.Workload(\"primary\", {\n complianceRegime: \"FEDRAMP_MODERATE\",\n displayName: \"{{display}}\",\n location: \"us-west1\",\n organization: \"123456789\",\n billingAccount: \"billingAccounts/000000-0000000-0000000-000000\",\n kmsSettings: {\n nextRotationTime: \"9999-10-02T15:01:23Z\",\n rotationPeriod: \"10368000s\",\n },\n provisionedResourcesParent: \"folders/519620126891\",\n resourceSettings: [\n {\n displayName: \"{{name}}\",\n resourceType: \"CONSUMER_FOLDER\",\n },\n {\n resourceType: \"ENCRYPTION_KEYS_PROJECT\",\n },\n {\n resourceId: \"ring\",\n resourceType: \"KEYRING\",\n },\n ],\n violationNotificationsEnabled: true,\n labels: {\n \"label-one\": \"value-one\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nprimary = gcp.assuredworkloads.Workload(\"primary\",\n compliance_regime=\"FEDRAMP_MODERATE\",\n display_name=\"{{display}}\",\n location=\"us-west1\",\n organization=\"123456789\",\n billing_account=\"billingAccounts/000000-0000000-0000000-000000\",\n kms_settings={\n \"next_rotation_time\": \"9999-10-02T15:01:23Z\",\n \"rotation_period\": \"10368000s\",\n },\n provisioned_resources_parent=\"folders/519620126891\",\n resource_settings=[\n {\n \"display_name\": \"{{name}}\",\n \"resource_type\": \"CONSUMER_FOLDER\",\n },\n {\n \"resource_type\": \"ENCRYPTION_KEYS_PROJECT\",\n },\n {\n \"resource_id\": \"ring\",\n \"resource_type\": \"KEYRING\",\n },\n ],\n violation_notifications_enabled=True,\n labels={\n \"label-one\": \"value-one\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var primary = new Gcp.AssuredWorkloads.Workload(\"primary\", new()\n {\n ComplianceRegime = \"FEDRAMP_MODERATE\",\n DisplayName = \"{{display}}\",\n Location = \"us-west1\",\n Organization = \"123456789\",\n BillingAccount = \"billingAccounts/000000-0000000-0000000-000000\",\n KmsSettings = new Gcp.AssuredWorkloads.Inputs.WorkloadKmsSettingsArgs\n {\n NextRotationTime = \"9999-10-02T15:01:23Z\",\n RotationPeriod = \"10368000s\",\n },\n ProvisionedResourcesParent = \"folders/519620126891\",\n ResourceSettings = new[]\n {\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n DisplayName = \"{{name}}\",\n ResourceType = \"CONSUMER_FOLDER\",\n },\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n ResourceType = \"ENCRYPTION_KEYS_PROJECT\",\n },\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n ResourceId = \"ring\",\n ResourceType = \"KEYRING\",\n },\n },\n ViolationNotificationsEnabled = true,\n Labels = \n {\n { \"label-one\", \"value-one\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/assuredworkloads\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := assuredworkloads.NewWorkload(ctx, \"primary\", \u0026assuredworkloads.WorkloadArgs{\n\t\t\tComplianceRegime: pulumi.String(\"FEDRAMP_MODERATE\"),\n\t\t\tDisplayName: pulumi.String(\"{{display}}\"),\n\t\t\tLocation: pulumi.String(\"us-west1\"),\n\t\t\tOrganization: pulumi.String(\"123456789\"),\n\t\t\tBillingAccount: pulumi.String(\"billingAccounts/000000-0000000-0000000-000000\"),\n\t\t\tKmsSettings: \u0026assuredworkloads.WorkloadKmsSettingsArgs{\n\t\t\t\tNextRotationTime: pulumi.String(\"9999-10-02T15:01:23Z\"),\n\t\t\t\tRotationPeriod: pulumi.String(\"10368000s\"),\n\t\t\t},\n\t\t\tProvisionedResourcesParent: pulumi.String(\"folders/519620126891\"),\n\t\t\tResourceSettings: assuredworkloads.WorkloadResourceSettingArray{\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tDisplayName: pulumi.String(\"{{name}}\"),\n\t\t\t\t\tResourceType: pulumi.String(\"CONSUMER_FOLDER\"),\n\t\t\t\t},\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tResourceType: pulumi.String(\"ENCRYPTION_KEYS_PROJECT\"),\n\t\t\t\t},\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tResourceId: pulumi.String(\"ring\"),\n\t\t\t\t\tResourceType: pulumi.String(\"KEYRING\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tViolationNotificationsEnabled: pulumi.Bool(true),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"label-one\": pulumi.String(\"value-one\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.assuredworkloads.Workload;\nimport com.pulumi.gcp.assuredworkloads.WorkloadArgs;\nimport com.pulumi.gcp.assuredworkloads.inputs.WorkloadKmsSettingsArgs;\nimport com.pulumi.gcp.assuredworkloads.inputs.WorkloadResourceSettingArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var primary = new Workload(\"primary\", WorkloadArgs.builder()\n .complianceRegime(\"FEDRAMP_MODERATE\")\n .displayName(\"{{display}}\")\n .location(\"us-west1\")\n .organization(\"123456789\")\n .billingAccount(\"billingAccounts/000000-0000000-0000000-000000\")\n .kmsSettings(WorkloadKmsSettingsArgs.builder()\n .nextRotationTime(\"9999-10-02T15:01:23Z\")\n .rotationPeriod(\"10368000s\")\n .build())\n .provisionedResourcesParent(\"folders/519620126891\")\n .resourceSettings( \n WorkloadResourceSettingArgs.builder()\n .displayName(\"{{name}}\")\n .resourceType(\"CONSUMER_FOLDER\")\n .build(),\n WorkloadResourceSettingArgs.builder()\n .resourceType(\"ENCRYPTION_KEYS_PROJECT\")\n .build(),\n WorkloadResourceSettingArgs.builder()\n .resourceId(\"ring\")\n .resourceType(\"KEYRING\")\n .build())\n .violationNotificationsEnabled(true)\n .labels(Map.of(\"label-one\", \"value-one\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n primary:\n type: gcp:assuredworkloads:Workload\n properties:\n complianceRegime: FEDRAMP_MODERATE\n displayName: '{{display}}'\n location: us-west1\n organization: '123456789'\n billingAccount: billingAccounts/000000-0000000-0000000-000000\n kmsSettings:\n nextRotationTime: 9999-10-02T15:01:23Z\n rotationPeriod: 10368000s\n provisionedResourcesParent: folders/519620126891\n resourceSettings:\n - displayName: '{{name}}'\n resourceType: CONSUMER_FOLDER\n - resourceType: ENCRYPTION_KEYS_PROJECT\n - resourceId: ring\n resourceType: KEYRING\n violationNotificationsEnabled: true\n labels:\n label-one: value-one\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Sovereign_controls_workload\nA Sovereign Controls test of the assuredworkloads api\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst primary = new gcp.assuredworkloads.Workload(\"primary\", {\n complianceRegime: \"EU_REGIONS_AND_SUPPORT\",\n displayName: \"display\",\n location: \"europe-west9\",\n organization: \"123456789\",\n billingAccount: \"billingAccounts/000000-0000000-0000000-000000\",\n enableSovereignControls: true,\n kmsSettings: {\n nextRotationTime: \"9999-10-02T15:01:23Z\",\n rotationPeriod: \"10368000s\",\n },\n resourceSettings: [\n {\n resourceType: \"CONSUMER_FOLDER\",\n },\n {\n resourceType: \"ENCRYPTION_KEYS_PROJECT\",\n },\n {\n resourceId: \"ring\",\n resourceType: \"KEYRING\",\n },\n ],\n labels: {\n \"label-one\": \"value-one\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nprimary = gcp.assuredworkloads.Workload(\"primary\",\n compliance_regime=\"EU_REGIONS_AND_SUPPORT\",\n display_name=\"display\",\n location=\"europe-west9\",\n organization=\"123456789\",\n billing_account=\"billingAccounts/000000-0000000-0000000-000000\",\n enable_sovereign_controls=True,\n kms_settings={\n \"next_rotation_time\": \"9999-10-02T15:01:23Z\",\n \"rotation_period\": \"10368000s\",\n },\n resource_settings=[\n {\n \"resource_type\": \"CONSUMER_FOLDER\",\n },\n {\n \"resource_type\": \"ENCRYPTION_KEYS_PROJECT\",\n },\n {\n \"resource_id\": \"ring\",\n \"resource_type\": \"KEYRING\",\n },\n ],\n labels={\n \"label-one\": \"value-one\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var primary = new Gcp.AssuredWorkloads.Workload(\"primary\", new()\n {\n ComplianceRegime = \"EU_REGIONS_AND_SUPPORT\",\n DisplayName = \"display\",\n Location = \"europe-west9\",\n Organization = \"123456789\",\n BillingAccount = \"billingAccounts/000000-0000000-0000000-000000\",\n EnableSovereignControls = true,\n KmsSettings = new Gcp.AssuredWorkloads.Inputs.WorkloadKmsSettingsArgs\n {\n NextRotationTime = \"9999-10-02T15:01:23Z\",\n RotationPeriod = \"10368000s\",\n },\n ResourceSettings = new[]\n {\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n ResourceType = \"CONSUMER_FOLDER\",\n },\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n ResourceType = \"ENCRYPTION_KEYS_PROJECT\",\n },\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n ResourceId = \"ring\",\n ResourceType = \"KEYRING\",\n },\n },\n Labels = \n {\n { \"label-one\", \"value-one\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/assuredworkloads\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := assuredworkloads.NewWorkload(ctx, \"primary\", \u0026assuredworkloads.WorkloadArgs{\n\t\t\tComplianceRegime: pulumi.String(\"EU_REGIONS_AND_SUPPORT\"),\n\t\t\tDisplayName: pulumi.String(\"display\"),\n\t\t\tLocation: pulumi.String(\"europe-west9\"),\n\t\t\tOrganization: pulumi.String(\"123456789\"),\n\t\t\tBillingAccount: pulumi.String(\"billingAccounts/000000-0000000-0000000-000000\"),\n\t\t\tEnableSovereignControls: pulumi.Bool(true),\n\t\t\tKmsSettings: \u0026assuredworkloads.WorkloadKmsSettingsArgs{\n\t\t\t\tNextRotationTime: pulumi.String(\"9999-10-02T15:01:23Z\"),\n\t\t\t\tRotationPeriod: pulumi.String(\"10368000s\"),\n\t\t\t},\n\t\t\tResourceSettings: assuredworkloads.WorkloadResourceSettingArray{\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tResourceType: pulumi.String(\"CONSUMER_FOLDER\"),\n\t\t\t\t},\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tResourceType: pulumi.String(\"ENCRYPTION_KEYS_PROJECT\"),\n\t\t\t\t},\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tResourceId: pulumi.String(\"ring\"),\n\t\t\t\t\tResourceType: pulumi.String(\"KEYRING\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"label-one\": pulumi.String(\"value-one\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.assuredworkloads.Workload;\nimport com.pulumi.gcp.assuredworkloads.WorkloadArgs;\nimport com.pulumi.gcp.assuredworkloads.inputs.WorkloadKmsSettingsArgs;\nimport com.pulumi.gcp.assuredworkloads.inputs.WorkloadResourceSettingArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var primary = new Workload(\"primary\", WorkloadArgs.builder()\n .complianceRegime(\"EU_REGIONS_AND_SUPPORT\")\n .displayName(\"display\")\n .location(\"europe-west9\")\n .organization(\"123456789\")\n .billingAccount(\"billingAccounts/000000-0000000-0000000-000000\")\n .enableSovereignControls(true)\n .kmsSettings(WorkloadKmsSettingsArgs.builder()\n .nextRotationTime(\"9999-10-02T15:01:23Z\")\n .rotationPeriod(\"10368000s\")\n .build())\n .resourceSettings( \n WorkloadResourceSettingArgs.builder()\n .resourceType(\"CONSUMER_FOLDER\")\n .build(),\n WorkloadResourceSettingArgs.builder()\n .resourceType(\"ENCRYPTION_KEYS_PROJECT\")\n .build(),\n WorkloadResourceSettingArgs.builder()\n .resourceId(\"ring\")\n .resourceType(\"KEYRING\")\n .build())\n .labels(Map.of(\"label-one\", \"value-one\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n primary:\n type: gcp:assuredworkloads:Workload\n properties:\n complianceRegime: EU_REGIONS_AND_SUPPORT\n displayName: display\n location: europe-west9\n organization: '123456789'\n billingAccount: billingAccounts/000000-0000000-0000000-000000\n enableSovereignControls: true\n kmsSettings:\n nextRotationTime: 9999-10-02T15:01:23Z\n rotationPeriod: 10368000s\n resourceSettings:\n - resourceType: CONSUMER_FOLDER\n - resourceType: ENCRYPTION_KEYS_PROJECT\n - resourceId: ring\n resourceType: KEYRING\n labels:\n label-one: value-one\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Split_billing_partner_workload\nA Split billing partner test of the assuredworkloads api\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst primary = new gcp.assuredworkloads.Workload(\"primary\", {\n complianceRegime: \"ASSURED_WORKLOADS_FOR_PARTNERS\",\n displayName: \"display\",\n location: \"europe-west8\",\n organization: \"123456789\",\n billingAccount: \"billingAccounts/000000-0000000-0000000-000000\",\n partner: \"SOVEREIGN_CONTROLS_BY_PSN\",\n partnerPermissions: {\n assuredWorkloadsMonitoring: true,\n dataLogsViewer: true,\n serviceAccessApprover: true,\n },\n partnerServicesBillingAccount: \"billingAccounts/01BF3F-2C6DE5-30C607\",\n resourceSettings: [\n {\n resourceType: \"CONSUMER_FOLDER\",\n },\n {\n resourceType: \"ENCRYPTION_KEYS_PROJECT\",\n },\n {\n resourceId: \"ring\",\n resourceType: \"KEYRING\",\n },\n ],\n violationNotificationsEnabled: true,\n labels: {\n \"label-one\": \"value-one\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nprimary = gcp.assuredworkloads.Workload(\"primary\",\n compliance_regime=\"ASSURED_WORKLOADS_FOR_PARTNERS\",\n display_name=\"display\",\n location=\"europe-west8\",\n organization=\"123456789\",\n billing_account=\"billingAccounts/000000-0000000-0000000-000000\",\n partner=\"SOVEREIGN_CONTROLS_BY_PSN\",\n partner_permissions={\n \"assured_workloads_monitoring\": True,\n \"data_logs_viewer\": True,\n \"service_access_approver\": True,\n },\n partner_services_billing_account=\"billingAccounts/01BF3F-2C6DE5-30C607\",\n resource_settings=[\n {\n \"resource_type\": \"CONSUMER_FOLDER\",\n },\n {\n \"resource_type\": \"ENCRYPTION_KEYS_PROJECT\",\n },\n {\n \"resource_id\": \"ring\",\n \"resource_type\": \"KEYRING\",\n },\n ],\n violation_notifications_enabled=True,\n labels={\n \"label-one\": \"value-one\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var primary = new Gcp.AssuredWorkloads.Workload(\"primary\", new()\n {\n ComplianceRegime = \"ASSURED_WORKLOADS_FOR_PARTNERS\",\n DisplayName = \"display\",\n Location = \"europe-west8\",\n Organization = \"123456789\",\n BillingAccount = \"billingAccounts/000000-0000000-0000000-000000\",\n Partner = \"SOVEREIGN_CONTROLS_BY_PSN\",\n PartnerPermissions = new Gcp.AssuredWorkloads.Inputs.WorkloadPartnerPermissionsArgs\n {\n AssuredWorkloadsMonitoring = true,\n DataLogsViewer = true,\n ServiceAccessApprover = true,\n },\n PartnerServicesBillingAccount = \"billingAccounts/01BF3F-2C6DE5-30C607\",\n ResourceSettings = new[]\n {\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n ResourceType = \"CONSUMER_FOLDER\",\n },\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n ResourceType = \"ENCRYPTION_KEYS_PROJECT\",\n },\n new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs\n {\n ResourceId = \"ring\",\n ResourceType = \"KEYRING\",\n },\n },\n ViolationNotificationsEnabled = true,\n Labels = \n {\n { \"label-one\", \"value-one\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/assuredworkloads\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := assuredworkloads.NewWorkload(ctx, \"primary\", \u0026assuredworkloads.WorkloadArgs{\n\t\t\tComplianceRegime: pulumi.String(\"ASSURED_WORKLOADS_FOR_PARTNERS\"),\n\t\t\tDisplayName: pulumi.String(\"display\"),\n\t\t\tLocation: pulumi.String(\"europe-west8\"),\n\t\t\tOrganization: pulumi.String(\"123456789\"),\n\t\t\tBillingAccount: pulumi.String(\"billingAccounts/000000-0000000-0000000-000000\"),\n\t\t\tPartner: pulumi.String(\"SOVEREIGN_CONTROLS_BY_PSN\"),\n\t\t\tPartnerPermissions: \u0026assuredworkloads.WorkloadPartnerPermissionsArgs{\n\t\t\t\tAssuredWorkloadsMonitoring: pulumi.Bool(true),\n\t\t\t\tDataLogsViewer: pulumi.Bool(true),\n\t\t\t\tServiceAccessApprover: pulumi.Bool(true),\n\t\t\t},\n\t\t\tPartnerServicesBillingAccount: pulumi.String(\"billingAccounts/01BF3F-2C6DE5-30C607\"),\n\t\t\tResourceSettings: assuredworkloads.WorkloadResourceSettingArray{\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tResourceType: pulumi.String(\"CONSUMER_FOLDER\"),\n\t\t\t\t},\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tResourceType: pulumi.String(\"ENCRYPTION_KEYS_PROJECT\"),\n\t\t\t\t},\n\t\t\t\t\u0026assuredworkloads.WorkloadResourceSettingArgs{\n\t\t\t\t\tResourceId: pulumi.String(\"ring\"),\n\t\t\t\t\tResourceType: pulumi.String(\"KEYRING\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tViolationNotificationsEnabled: pulumi.Bool(true),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"label-one\": pulumi.String(\"value-one\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.assuredworkloads.Workload;\nimport com.pulumi.gcp.assuredworkloads.WorkloadArgs;\nimport com.pulumi.gcp.assuredworkloads.inputs.WorkloadPartnerPermissionsArgs;\nimport com.pulumi.gcp.assuredworkloads.inputs.WorkloadResourceSettingArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var primary = new Workload(\"primary\", WorkloadArgs.builder()\n .complianceRegime(\"ASSURED_WORKLOADS_FOR_PARTNERS\")\n .displayName(\"display\")\n .location(\"europe-west8\")\n .organization(\"123456789\")\n .billingAccount(\"billingAccounts/000000-0000000-0000000-000000\")\n .partner(\"SOVEREIGN_CONTROLS_BY_PSN\")\n .partnerPermissions(WorkloadPartnerPermissionsArgs.builder()\n .assuredWorkloadsMonitoring(true)\n .dataLogsViewer(true)\n .serviceAccessApprover(true)\n .build())\n .partnerServicesBillingAccount(\"billingAccounts/01BF3F-2C6DE5-30C607\")\n .resourceSettings( \n WorkloadResourceSettingArgs.builder()\n .resourceType(\"CONSUMER_FOLDER\")\n .build(),\n WorkloadResourceSettingArgs.builder()\n .resourceType(\"ENCRYPTION_KEYS_PROJECT\")\n .build(),\n WorkloadResourceSettingArgs.builder()\n .resourceId(\"ring\")\n .resourceType(\"KEYRING\")\n .build())\n .violationNotificationsEnabled(true)\n .labels(Map.of(\"label-one\", \"value-one\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n primary:\n type: gcp:assuredworkloads:Workload\n properties:\n complianceRegime: ASSURED_WORKLOADS_FOR_PARTNERS\n displayName: display\n location: europe-west8\n organization: '123456789'\n billingAccount: billingAccounts/000000-0000000-0000000-000000\n partner: SOVEREIGN_CONTROLS_BY_PSN\n partnerPermissions:\n assuredWorkloadsMonitoring: true\n dataLogsViewer: true\n serviceAccessApprover: true\n partnerServicesBillingAccount: billingAccounts/01BF3F-2C6DE5-30C607\n resourceSettings:\n - resourceType: CONSUMER_FOLDER\n - resourceType: ENCRYPTION_KEYS_PROJECT\n - resourceId: ring\n resourceType: KEYRING\n violationNotificationsEnabled: true\n labels:\n label-one: value-one\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nWorkload can be imported using any of these accepted formats:\n\n* `organizations/{{organization}}/locations/{{location}}/workloads/{{name}}`\n\n* `{{organization}}/{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, Workload can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:assuredworkloads/workload:Workload default organizations/{{organization}}/locations/{{location}}/workloads/{{name}}\n```\n\n```sh\n$ pulumi import gcp:assuredworkloads/workload:Workload default {{organization}}/{{location}}/{{name}}\n```\n\n", "properties": { "billingAccount": { "type": "string", @@ -122459,7 +123617,7 @@ }, "complianceRegime": { "type": "string", - "description": "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT\n" + "description": "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS\n" }, "complianceStatuses": { "type": "array", @@ -122531,12 +123689,16 @@ }, "partner": { "type": "string", - "description": "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN\n" + "description": "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM\n" }, "partnerPermissions": { "$ref": "#/types/gcp:assuredworkloads/WorkloadPartnerPermissions:WorkloadPartnerPermissions", "description": "Optional. Permissions granted to the AW Partner SA account for the customer workload\n" }, + "partnerServicesBillingAccount": { + "type": "string", + "description": "Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC.\n" + }, "provisionedResourcesParent": { "type": "string", "description": "Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id}\n" @@ -122601,7 +123763,7 @@ }, "complianceRegime": { "type": "string", - "description": "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT\n", + "description": "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS\n", "willReplaceOnChanges": true }, "displayName": { @@ -122637,7 +123799,7 @@ }, "partner": { "type": "string", - "description": "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN\n", + "description": "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM\n", "willReplaceOnChanges": true }, "partnerPermissions": { @@ -122645,6 +123807,11 @@ "description": "Optional. Permissions granted to the AW Partner SA account for the customer workload\n", "willReplaceOnChanges": true }, + "partnerServicesBillingAccount": { + "type": "string", + "description": "Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC.\n", + "willReplaceOnChanges": true + }, "provisionedResourcesParent": { "type": "string", "description": "Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id}\n", @@ -122680,7 +123847,7 @@ }, "complianceRegime": { "type": "string", - "description": "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT\n", + "description": "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS\n", "willReplaceOnChanges": true }, "complianceStatuses": { @@ -122757,7 +123924,7 @@ }, "partner": { "type": "string", - "description": "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN\n", + "description": "Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM\n", "willReplaceOnChanges": true }, "partnerPermissions": { @@ -122765,6 +123932,11 @@ "description": "Optional. Permissions granted to the AW Partner SA account for the customer workload\n", "willReplaceOnChanges": true }, + "partnerServicesBillingAccount": { + "type": "string", + "description": "Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC.\n", + "willReplaceOnChanges": true + }, "provisionedResourcesParent": { "type": "string", "description": "Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id}\n", @@ -122809,6 +123981,323 @@ "type": "object" } }, + "gcp:backupdisasterrecovery/backupVault:BackupVault": { + "description": "## Example Usage\n\n### Backup Dr Backup Vault Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst backup_vault_test = new gcp.backupdisasterrecovery.BackupVault(\"backup-vault-test\", {\n location: \"us-central1\",\n backupVaultId: \"backup-vault-test\",\n description: \"This is a second backup vault built by Terraform.\",\n backupMinimumEnforcedRetentionDuration: \"100000s\",\n labels: {\n foo: \"bar1\",\n bar: \"baz1\",\n },\n annotations: {\n annotations1: \"bar1\",\n annotations2: \"baz1\",\n },\n forceUpdate: true,\n forceDelete: true,\n allowMissing: true,\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nbackup_vault_test = gcp.backupdisasterrecovery.BackupVault(\"backup-vault-test\",\n location=\"us-central1\",\n backup_vault_id=\"backup-vault-test\",\n description=\"This is a second backup vault built by Terraform.\",\n backup_minimum_enforced_retention_duration=\"100000s\",\n labels={\n \"foo\": \"bar1\",\n \"bar\": \"baz1\",\n },\n annotations={\n \"annotations1\": \"bar1\",\n \"annotations2\": \"baz1\",\n },\n force_update=True,\n force_delete=True,\n allow_missing=True)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var backup_vault_test = new Gcp.BackupDisasterRecovery.BackupVault(\"backup-vault-test\", new()\n {\n Location = \"us-central1\",\n BackupVaultId = \"backup-vault-test\",\n Description = \"This is a second backup vault built by Terraform.\",\n BackupMinimumEnforcedRetentionDuration = \"100000s\",\n Labels = \n {\n { \"foo\", \"bar1\" },\n { \"bar\", \"baz1\" },\n },\n Annotations = \n {\n { \"annotations1\", \"bar1\" },\n { \"annotations2\", \"baz1\" },\n },\n ForceUpdate = true,\n ForceDelete = true,\n AllowMissing = true,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/backupdisasterrecovery\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := backupdisasterrecovery.NewBackupVault(ctx, \"backup-vault-test\", \u0026backupdisasterrecovery.BackupVaultArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tBackupVaultId: pulumi.String(\"backup-vault-test\"),\n\t\t\tDescription: pulumi.String(\"This is a second backup vault built by Terraform.\"),\n\t\t\tBackupMinimumEnforcedRetentionDuration: pulumi.String(\"100000s\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar1\"),\n\t\t\t\t\"bar\": pulumi.String(\"baz1\"),\n\t\t\t},\n\t\t\tAnnotations: pulumi.StringMap{\n\t\t\t\t\"annotations1\": pulumi.String(\"bar1\"),\n\t\t\t\t\"annotations2\": pulumi.String(\"baz1\"),\n\t\t\t},\n\t\t\tForceUpdate: pulumi.Bool(true),\n\t\t\tForceDelete: pulumi.Bool(true),\n\t\t\tAllowMissing: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.backupdisasterrecovery.BackupVault;\nimport com.pulumi.gcp.backupdisasterrecovery.BackupVaultArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var backup_vault_test = new BackupVault(\"backup-vault-test\", BackupVaultArgs.builder()\n .location(\"us-central1\")\n .backupVaultId(\"backup-vault-test\")\n .description(\"This is a second backup vault built by Terraform.\")\n .backupMinimumEnforcedRetentionDuration(\"100000s\")\n .labels(Map.ofEntries(\n Map.entry(\"foo\", \"bar1\"),\n Map.entry(\"bar\", \"baz1\")\n ))\n .annotations(Map.ofEntries(\n Map.entry(\"annotations1\", \"bar1\"),\n Map.entry(\"annotations2\", \"baz1\")\n ))\n .forceUpdate(\"true\")\n .forceDelete(\"true\")\n .allowMissing(\"true\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n backup-vault-test:\n type: gcp:backupdisasterrecovery:BackupVault\n properties:\n location: us-central1\n backupVaultId: backup-vault-test\n description: This is a second backup vault built by Terraform.\n backupMinimumEnforcedRetentionDuration: 100000s\n labels:\n foo: bar1\n bar: baz1\n annotations:\n annotations1: bar1\n annotations2: baz1\n forceUpdate: 'true'\n forceDelete: 'true'\n allowMissing: 'true'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nBackupVault can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}`\n\n* `{{project}}/{{location}}/{{backup_vault_id}}`\n\n* `{{location}}/{{backup_vault_id}}`\n\nWhen using the `pulumi import` command, BackupVault can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}\n```\n\n```sh\n$ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default {{project}}/{{location}}/{{backup_vault_id}}\n```\n\n```sh\n$ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default {{location}}/{{backup_vault_id}}\n```\n\n", + "properties": { + "allowMissing": { + "type": "boolean", + "description": "Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist.\n" + }, + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Optional. User annotations. See https://google.aip.dev/128#annotations\nStores small amounts of arbitrary data.\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.\n" + }, + "backupCount": { + "type": "string", + "description": "Output only. The number of backups in this backup vault.\n" + }, + "backupMinimumEnforcedRetentionDuration": { + "type": "string", + "description": "Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended.\n" + }, + "backupVaultId": { + "type": "string", + "description": "Required. ID of the requesting object.\n\n\n- - -\n" + }, + "createTime": { + "type": "string", + "description": "Output only. The time when the instance was created.\n" + }, + "deletable": { + "type": "boolean", + "description": "Output only. Set to true when there are no backups nested under this resource.\n" + }, + "description": { + "type": "string", + "description": "Optional. The description of the BackupVault instance (2048 characters or less).\n" + }, + "effectiveAnnotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "effectiveLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.\n", + "secret": true + }, + "effectiveTime": { + "type": "string", + "description": "Optional. Time after which the BackupVault resource is locked.\n" + }, + "etag": { + "type": "string", + "description": "Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other.\n" + }, + "forceDelete": { + "type": "boolean", + "description": "If set, the following restrictions against deletion of the backup vault instance can be overridden:\n* deletion of a backup vault instance containing no backups, but still containing empty datasources.\n* deletion of a backup vault instance that is being referenced by an active backup plan.\n" + }, + "forceUpdate": { + "type": "boolean", + "description": "If set, allow update to extend the minimum enforced retention for backup vault. This overrides\nthe restriction against conflicting retention periods. This conflict may occur when the\nexpiration schedule defined by the associated backup plan is shorter than the minimum\nretention set by the backup vault.\n" + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Resource labels to represent user provided metadata.\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.\n" + }, + "location": { + "type": "string", + "description": "The GCP location for the backup vault.\n" + }, + "name": { + "type": "string", + "description": "Output only. Identifier. The resource name.\n" + }, + "project": { + "type": "string", + "description": "The ID of the project in which the resource belongs.\nIf it is not provided, the provider project is used.\n" + }, + "pulumiLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "The combination of labels configured directly on the resource\nand default labels configured on the provider.\n", + "secret": true + }, + "serviceAccount": { + "type": "string", + "description": "Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there.\n" + }, + "state": { + "type": "string", + "description": "Output only. The BackupVault resource instance state.\nPossible values:\nSTATE_UNSPECIFIED\nCREATING\nACTIVE\nDELETING\nERROR\n" + }, + "totalStoredBytes": { + "type": "string", + "description": "Output only. Total size of the storage used by all backup resources.\n" + }, + "uid": { + "type": "string", + "description": "Output only. Output only Immutable after resource creation until resource deletion.\n" + }, + "updateTime": { + "type": "string", + "description": "Output only. The time when the instance was updated.\n" + } + }, + "required": [ + "backupCount", + "backupMinimumEnforcedRetentionDuration", + "backupVaultId", + "createTime", + "deletable", + "effectiveAnnotations", + "effectiveLabels", + "etag", + "location", + "name", + "project", + "serviceAccount", + "state", + "pulumiLabels", + "totalStoredBytes", + "uid", + "updateTime" + ], + "inputProperties": { + "allowMissing": { + "type": "boolean", + "description": "Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist.\n" + }, + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Optional. User annotations. See https://google.aip.dev/128#annotations\nStores small amounts of arbitrary data.\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.\n" + }, + "backupMinimumEnforcedRetentionDuration": { + "type": "string", + "description": "Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended.\n" + }, + "backupVaultId": { + "type": "string", + "description": "Required. ID of the requesting object.\n\n\n- - -\n", + "willReplaceOnChanges": true + }, + "description": { + "type": "string", + "description": "Optional. The description of the BackupVault instance (2048 characters or less).\n" + }, + "effectiveTime": { + "type": "string", + "description": "Optional. Time after which the BackupVault resource is locked.\n" + }, + "forceDelete": { + "type": "boolean", + "description": "If set, the following restrictions against deletion of the backup vault instance can be overridden:\n* deletion of a backup vault instance containing no backups, but still containing empty datasources.\n* deletion of a backup vault instance that is being referenced by an active backup plan.\n" + }, + "forceUpdate": { + "type": "boolean", + "description": "If set, allow update to extend the minimum enforced retention for backup vault. This overrides\nthe restriction against conflicting retention periods. This conflict may occur when the\nexpiration schedule defined by the associated backup plan is shorter than the minimum\nretention set by the backup vault.\n" + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Resource labels to represent user provided metadata.\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.\n" + }, + "location": { + "type": "string", + "description": "The GCP location for the backup vault.\n", + "willReplaceOnChanges": true + }, + "project": { + "type": "string", + "description": "The ID of the project in which the resource belongs.\nIf it is not provided, the provider project is used.\n", + "willReplaceOnChanges": true + } + }, + "requiredInputs": [ + "backupMinimumEnforcedRetentionDuration", + "backupVaultId", + "location" + ], + "stateInputs": { + "description": "Input properties used for looking up and filtering BackupVault resources.\n", + "properties": { + "allowMissing": { + "type": "boolean", + "description": "Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist.\n" + }, + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Optional. User annotations. See https://google.aip.dev/128#annotations\nStores small amounts of arbitrary data.\n**Note**: This field is non-authoritative, and will only manage the annotations present in your configuration.\nPlease refer to the field `effective_annotations` for all of the annotations present on the resource.\n" + }, + "backupCount": { + "type": "string", + "description": "Output only. The number of backups in this backup vault.\n" + }, + "backupMinimumEnforcedRetentionDuration": { + "type": "string", + "description": "Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended.\n" + }, + "backupVaultId": { + "type": "string", + "description": "Required. ID of the requesting object.\n\n\n- - -\n", + "willReplaceOnChanges": true + }, + "createTime": { + "type": "string", + "description": "Output only. The time when the instance was created.\n" + }, + "deletable": { + "type": "boolean", + "description": "Output only. Set to true when there are no backups nested under this resource.\n" + }, + "description": { + "type": "string", + "description": "Optional. The description of the BackupVault instance (2048 characters or less).\n" + }, + "effectiveAnnotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "effectiveLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.\n", + "secret": true + }, + "effectiveTime": { + "type": "string", + "description": "Optional. Time after which the BackupVault resource is locked.\n" + }, + "etag": { + "type": "string", + "description": "Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other.\n" + }, + "forceDelete": { + "type": "boolean", + "description": "If set, the following restrictions against deletion of the backup vault instance can be overridden:\n* deletion of a backup vault instance containing no backups, but still containing empty datasources.\n* deletion of a backup vault instance that is being referenced by an active backup plan.\n" + }, + "forceUpdate": { + "type": "boolean", + "description": "If set, allow update to extend the minimum enforced retention for backup vault. This overrides\nthe restriction against conflicting retention periods. This conflict may occur when the\nexpiration schedule defined by the associated backup plan is shorter than the minimum\nretention set by the backup vault.\n" + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Optional. Resource labels to represent user provided metadata.\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.\n" + }, + "location": { + "type": "string", + "description": "The GCP location for the backup vault.\n", + "willReplaceOnChanges": true + }, + "name": { + "type": "string", + "description": "Output only. Identifier. The resource name.\n" + }, + "project": { + "type": "string", + "description": "The ID of the project in which the resource belongs.\nIf it is not provided, the provider project is used.\n", + "willReplaceOnChanges": true + }, + "pulumiLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "The combination of labels configured directly on the resource\nand default labels configured on the provider.\n", + "secret": true + }, + "serviceAccount": { + "type": "string", + "description": "Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there.\n" + }, + "state": { + "type": "string", + "description": "Output only. The BackupVault resource instance state.\nPossible values:\nSTATE_UNSPECIFIED\nCREATING\nACTIVE\nDELETING\nERROR\n" + }, + "totalStoredBytes": { + "type": "string", + "description": "Output only. Total size of the storage used by all backup resources.\n" + }, + "uid": { + "type": "string", + "description": "Output only. Output only Immutable after resource creation until resource deletion.\n" + }, + "updateTime": { + "type": "string", + "description": "Output only. The time when the instance was updated.\n" + } + }, + "type": "object" + } + }, "gcp:backupdisasterrecovery/managementServer:ManagementServer": { "description": "## Example Usage\n\n### Backup Dr Management Server\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.compute.Network(\"default\", {name: \"vpc-network\"});\nconst privateIpAddress = new gcp.compute.GlobalAddress(\"private_ip_address\", {\n name: \"vpc-network\",\n addressType: \"INTERNAL\",\n purpose: \"VPC_PEERING\",\n prefixLength: 20,\n network: _default.id,\n});\nconst defaultConnection = new gcp.servicenetworking.Connection(\"default\", {\n network: _default.id,\n service: \"servicenetworking.googleapis.com\",\n reservedPeeringRanges: [privateIpAddress.name],\n});\nconst ms_console = new gcp.backupdisasterrecovery.ManagementServer(\"ms-console\", {\n location: \"us-central1\",\n name: \"ms-console\",\n type: \"BACKUP_RESTORE\",\n networks: [{\n network: _default.id,\n peeringMode: \"PRIVATE_SERVICE_ACCESS\",\n }],\n}, {\n dependsOn: [defaultConnection],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.compute.Network(\"default\", name=\"vpc-network\")\nprivate_ip_address = gcp.compute.GlobalAddress(\"private_ip_address\",\n name=\"vpc-network\",\n address_type=\"INTERNAL\",\n purpose=\"VPC_PEERING\",\n prefix_length=20,\n network=default.id)\ndefault_connection = gcp.servicenetworking.Connection(\"default\",\n network=default.id,\n service=\"servicenetworking.googleapis.com\",\n reserved_peering_ranges=[private_ip_address.name])\nms_console = gcp.backupdisasterrecovery.ManagementServer(\"ms-console\",\n location=\"us-central1\",\n name=\"ms-console\",\n type=\"BACKUP_RESTORE\",\n networks=[{\n \"network\": default.id,\n \"peering_mode\": \"PRIVATE_SERVICE_ACCESS\",\n }],\n opts = pulumi.ResourceOptions(depends_on=[default_connection]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.Compute.Network(\"default\", new()\n {\n Name = \"vpc-network\",\n });\n\n var privateIpAddress = new Gcp.Compute.GlobalAddress(\"private_ip_address\", new()\n {\n Name = \"vpc-network\",\n AddressType = \"INTERNAL\",\n Purpose = \"VPC_PEERING\",\n PrefixLength = 20,\n Network = @default.Id,\n });\n\n var defaultConnection = new Gcp.ServiceNetworking.Connection(\"default\", new()\n {\n Network = @default.Id,\n Service = \"servicenetworking.googleapis.com\",\n ReservedPeeringRanges = new[]\n {\n privateIpAddress.Name,\n },\n });\n\n var ms_console = new Gcp.BackupDisasterRecovery.ManagementServer(\"ms-console\", new()\n {\n Location = \"us-central1\",\n Name = \"ms-console\",\n Type = \"BACKUP_RESTORE\",\n Networks = new[]\n {\n new Gcp.BackupDisasterRecovery.Inputs.ManagementServerNetworkArgs\n {\n Network = @default.Id,\n PeeringMode = \"PRIVATE_SERVICE_ACCESS\",\n },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n defaultConnection,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/backupdisasterrecovery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/servicenetworking\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewNetwork(ctx, \"default\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"vpc-network\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tprivateIpAddress, err := compute.NewGlobalAddress(ctx, \"private_ip_address\", \u0026compute.GlobalAddressArgs{\n\t\t\tName: pulumi.String(\"vpc-network\"),\n\t\t\tAddressType: pulumi.String(\"INTERNAL\"),\n\t\t\tPurpose: pulumi.String(\"VPC_PEERING\"),\n\t\t\tPrefixLength: pulumi.Int(20),\n\t\t\tNetwork: _default.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefaultConnection, err := servicenetworking.NewConnection(ctx, \"default\", \u0026servicenetworking.ConnectionArgs{\n\t\t\tNetwork: _default.ID(),\n\t\t\tService: pulumi.String(\"servicenetworking.googleapis.com\"),\n\t\t\tReservedPeeringRanges: pulumi.StringArray{\n\t\t\t\tprivateIpAddress.Name,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = backupdisasterrecovery.NewManagementServer(ctx, \"ms-console\", \u0026backupdisasterrecovery.ManagementServerArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tName: pulumi.String(\"ms-console\"),\n\t\t\tType: pulumi.String(\"BACKUP_RESTORE\"),\n\t\t\tNetworks: backupdisasterrecovery.ManagementServerNetworkArray{\n\t\t\t\t\u0026backupdisasterrecovery.ManagementServerNetworkArgs{\n\t\t\t\t\tNetwork: _default.ID(),\n\t\t\t\t\tPeeringMode: pulumi.String(\"PRIVATE_SERVICE_ACCESS\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tdefaultConnection,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.compute.GlobalAddress;\nimport com.pulumi.gcp.compute.GlobalAddressArgs;\nimport com.pulumi.gcp.servicenetworking.Connection;\nimport com.pulumi.gcp.servicenetworking.ConnectionArgs;\nimport com.pulumi.gcp.backupdisasterrecovery.ManagementServer;\nimport com.pulumi.gcp.backupdisasterrecovery.ManagementServerArgs;\nimport com.pulumi.gcp.backupdisasterrecovery.inputs.ManagementServerNetworkArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Network(\"default\", NetworkArgs.builder()\n .name(\"vpc-network\")\n .build());\n\n var privateIpAddress = new GlobalAddress(\"privateIpAddress\", GlobalAddressArgs.builder()\n .name(\"vpc-network\")\n .addressType(\"INTERNAL\")\n .purpose(\"VPC_PEERING\")\n .prefixLength(20)\n .network(default_.id())\n .build());\n\n var defaultConnection = new Connection(\"defaultConnection\", ConnectionArgs.builder()\n .network(default_.id())\n .service(\"servicenetworking.googleapis.com\")\n .reservedPeeringRanges(privateIpAddress.name())\n .build());\n\n var ms_console = new ManagementServer(\"ms-console\", ManagementServerArgs.builder()\n .location(\"us-central1\")\n .name(\"ms-console\")\n .type(\"BACKUP_RESTORE\")\n .networks(ManagementServerNetworkArgs.builder()\n .network(default_.id())\n .peeringMode(\"PRIVATE_SERVICE_ACCESS\")\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(defaultConnection)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:compute:Network\n properties:\n name: vpc-network\n privateIpAddress:\n type: gcp:compute:GlobalAddress\n name: private_ip_address\n properties:\n name: vpc-network\n addressType: INTERNAL\n purpose: VPC_PEERING\n prefixLength: 20\n network: ${default.id}\n defaultConnection:\n type: gcp:servicenetworking:Connection\n name: default\n properties:\n network: ${default.id}\n service: servicenetworking.googleapis.com\n reservedPeeringRanges:\n - ${privateIpAddress.name}\n ms-console:\n type: gcp:backupdisasterrecovery:ManagementServer\n properties:\n location: us-central1\n name: ms-console\n type: BACKUP_RESTORE\n networks:\n - network: ${default.id}\n peeringMode: PRIVATE_SERVICE_ACCESS\n options:\n dependson:\n - ${defaultConnection}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nManagementServer can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/managementServers/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, ManagementServer can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:backupdisasterrecovery/managementServer:ManagementServer default projects/{{project}}/locations/{{location}}/managementServers/{{name}}\n```\n\n```sh\n$ pulumi import gcp:backupdisasterrecovery/managementServer:ManagementServer default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:backupdisasterrecovery/managementServer:ManagementServer default {{location}}/{{name}}\n```\n\n", "properties": { @@ -124720,7 +126209,7 @@ } }, "gcp:bigquery/dataTransferConfig:DataTransferConfig": { - "description": "Represents a data transfer configuration. A transfer configuration\ncontains all metadata needed to perform a data transfer.\n\n\nTo get more information about Config, see:\n\n* [API documentation](https://cloud.google.com/bigquery/docs/reference/datatransfer/rest/v1/projects.locations.transferConfigs/create)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/bigquery/docs/reference/datatransfer/rest/)\n\n\n\n## Example Usage\n\n### Bigquerydatatransfer Config Scheduled Query\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst project = gcp.organizations.getProject({});\nconst permissions = new gcp.projects.IAMMember(\"permissions\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/iam.serviceAccountTokenCreator\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com`),\n});\nconst myDataset = new gcp.bigquery.Dataset(\"my_dataset\", {\n datasetId: \"my_dataset\",\n friendlyName: \"foo\",\n description: \"bar\",\n location: \"asia-northeast1\",\n}, {\n dependsOn: [permissions],\n});\nconst queryConfig = new gcp.bigquery.DataTransferConfig(\"query_config\", {\n displayName: \"my-query\",\n location: \"asia-northeast1\",\n dataSourceId: \"scheduled_query\",\n schedule: \"first sunday of quarter 00:00\",\n destinationDatasetId: myDataset.datasetId,\n params: {\n destination_table_name_template: \"my_table\",\n write_disposition: \"WRITE_APPEND\",\n query: \"SELECT name FROM tabl WHERE x = 'y'\",\n },\n}, {\n dependsOn: [permissions],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproject = gcp.organizations.get_project()\npermissions = gcp.projects.IAMMember(\"permissions\",\n project=project.project_id,\n role=\"roles/iam.serviceAccountTokenCreator\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\")\nmy_dataset = gcp.bigquery.Dataset(\"my_dataset\",\n dataset_id=\"my_dataset\",\n friendly_name=\"foo\",\n description=\"bar\",\n location=\"asia-northeast1\",\n opts = pulumi.ResourceOptions(depends_on=[permissions]))\nquery_config = gcp.bigquery.DataTransferConfig(\"query_config\",\n display_name=\"my-query\",\n location=\"asia-northeast1\",\n data_source_id=\"scheduled_query\",\n schedule=\"first sunday of quarter 00:00\",\n destination_dataset_id=my_dataset.dataset_id,\n params={\n \"destination_table_name_template\": \"my_table\",\n \"write_disposition\": \"WRITE_APPEND\",\n \"query\": \"SELECT name FROM tabl WHERE x = 'y'\",\n },\n opts = pulumi.ResourceOptions(depends_on=[permissions]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var permissions = new Gcp.Projects.IAMMember(\"permissions\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/iam.serviceAccountTokenCreator\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\",\n });\n\n var myDataset = new Gcp.BigQuery.Dataset(\"my_dataset\", new()\n {\n DatasetId = \"my_dataset\",\n FriendlyName = \"foo\",\n Description = \"bar\",\n Location = \"asia-northeast1\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n permissions,\n },\n });\n\n var queryConfig = new Gcp.BigQuery.DataTransferConfig(\"query_config\", new()\n {\n DisplayName = \"my-query\",\n Location = \"asia-northeast1\",\n DataSourceId = \"scheduled_query\",\n Schedule = \"first sunday of quarter 00:00\",\n DestinationDatasetId = myDataset.DatasetId,\n Params = \n {\n { \"destination_table_name_template\", \"my_table\" },\n { \"write_disposition\", \"WRITE_APPEND\" },\n { \"query\", \"SELECT name FROM tabl WHERE x = 'y'\" },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n permissions,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpermissions, err := projects.NewIAMMember(ctx, \"permissions\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/iam.serviceAccountTokenCreator\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmyDataset, err := bigquery.NewDataset(ctx, \"my_dataset\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"my_dataset\"),\n\t\t\tFriendlyName: pulumi.String(\"foo\"),\n\t\t\tDescription: pulumi.String(\"bar\"),\n\t\t\tLocation: pulumi.String(\"asia-northeast1\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tpermissions,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = bigquery.NewDataTransferConfig(ctx, \"query_config\", \u0026bigquery.DataTransferConfigArgs{\n\t\t\tDisplayName: pulumi.String(\"my-query\"),\n\t\t\tLocation: pulumi.String(\"asia-northeast1\"),\n\t\t\tDataSourceId: pulumi.String(\"scheduled_query\"),\n\t\t\tSchedule: pulumi.String(\"first sunday of quarter 00:00\"),\n\t\t\tDestinationDatasetId: myDataset.DatasetId,\n\t\t\tParams: pulumi.StringMap{\n\t\t\t\t\"destination_table_name_template\": pulumi.String(\"my_table\"),\n\t\t\t\t\"write_disposition\": pulumi.String(\"WRITE_APPEND\"),\n\t\t\t\t\"query\": pulumi.String(\"SELECT name FROM tabl WHERE x = 'y'\"),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tpermissions,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.projects.IAMMember;\nimport com.pulumi.gcp.projects.IAMMemberArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigquery.DataTransferConfig;\nimport com.pulumi.gcp.bigquery.DataTransferConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var permissions = new IAMMember(\"permissions\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/iam.serviceAccountTokenCreator\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var myDataset = new Dataset(\"myDataset\", DatasetArgs.builder()\n .datasetId(\"my_dataset\")\n .friendlyName(\"foo\")\n .description(\"bar\")\n .location(\"asia-northeast1\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(permissions)\n .build());\n\n var queryConfig = new DataTransferConfig(\"queryConfig\", DataTransferConfigArgs.builder()\n .displayName(\"my-query\")\n .location(\"asia-northeast1\")\n .dataSourceId(\"scheduled_query\")\n .schedule(\"first sunday of quarter 00:00\")\n .destinationDatasetId(myDataset.datasetId())\n .params(Map.ofEntries(\n Map.entry(\"destination_table_name_template\", \"my_table\"),\n Map.entry(\"write_disposition\", \"WRITE_APPEND\"),\n Map.entry(\"query\", \"SELECT name FROM tabl WHERE x = 'y'\")\n ))\n .build(), CustomResourceOptions.builder()\n .dependsOn(permissions)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n permissions:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/iam.serviceAccountTokenCreator\n member: serviceAccount:service-${project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\n queryConfig:\n type: gcp:bigquery:DataTransferConfig\n name: query_config\n properties:\n displayName: my-query\n location: asia-northeast1\n dataSourceId: scheduled_query\n schedule: first sunday of quarter 00:00\n destinationDatasetId: ${myDataset.datasetId}\n params:\n destination_table_name_template: my_table\n write_disposition: WRITE_APPEND\n query: SELECT name FROM tabl WHERE x = 'y'\n options:\n dependson:\n - ${permissions}\n myDataset:\n type: gcp:bigquery:Dataset\n name: my_dataset\n properties:\n datasetId: my_dataset\n friendlyName: foo\n description: bar\n location: asia-northeast1\n options:\n dependson:\n - ${permissions}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Bigquerydatatransfer Config Salesforce\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst project = gcp.organizations.getProject({});\nconst myDataset = new gcp.bigquery.Dataset(\"my_dataset\", {\n datasetId: \"my_dataset\",\n description: \"My dataset\",\n location: \"asia-northeast1\",\n});\nconst salesforceConfig = new gcp.bigquery.DataTransferConfig(\"salesforce_config\", {\n displayName: \"my-salesforce-config\",\n location: \"asia-northeast1\",\n dataSourceId: \"salesforce\",\n schedule: \"first sunday of quarter 00:00\",\n destinationDatasetId: myDataset.datasetId,\n params: {\n \"connector.authentication.oauth.clientId\": \"client-id\",\n \"connector.authentication.oauth.clientSecret\": \"client-secret\",\n \"connector.authentication.username\": \"username\",\n \"connector.authentication.password\": \"password\",\n \"connector.authentication.securityToken\": \"security-token\",\n assets: \"[\\\"asset-a\\\",\\\"asset-b\\\"]\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproject = gcp.organizations.get_project()\nmy_dataset = gcp.bigquery.Dataset(\"my_dataset\",\n dataset_id=\"my_dataset\",\n description=\"My dataset\",\n location=\"asia-northeast1\")\nsalesforce_config = gcp.bigquery.DataTransferConfig(\"salesforce_config\",\n display_name=\"my-salesforce-config\",\n location=\"asia-northeast1\",\n data_source_id=\"salesforce\",\n schedule=\"first sunday of quarter 00:00\",\n destination_dataset_id=my_dataset.dataset_id,\n params={\n \"connector.authentication.oauth.clientId\": \"client-id\",\n \"connector.authentication.oauth.clientSecret\": \"client-secret\",\n \"connector.authentication.username\": \"username\",\n \"connector.authentication.password\": \"password\",\n \"connector.authentication.securityToken\": \"security-token\",\n \"assets\": \"[\\\"asset-a\\\",\\\"asset-b\\\"]\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var myDataset = new Gcp.BigQuery.Dataset(\"my_dataset\", new()\n {\n DatasetId = \"my_dataset\",\n Description = \"My dataset\",\n Location = \"asia-northeast1\",\n });\n\n var salesforceConfig = new Gcp.BigQuery.DataTransferConfig(\"salesforce_config\", new()\n {\n DisplayName = \"my-salesforce-config\",\n Location = \"asia-northeast1\",\n DataSourceId = \"salesforce\",\n Schedule = \"first sunday of quarter 00:00\",\n DestinationDatasetId = myDataset.DatasetId,\n Params = \n {\n { \"connector.authentication.oauth.clientId\", \"client-id\" },\n { \"connector.authentication.oauth.clientSecret\", \"client-secret\" },\n { \"connector.authentication.username\", \"username\" },\n { \"connector.authentication.password\", \"password\" },\n { \"connector.authentication.securityToken\", \"security-token\" },\n { \"assets\", \"[\\\"asset-a\\\",\\\"asset-b\\\"]\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmyDataset, err := bigquery.NewDataset(ctx, \"my_dataset\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"my_dataset\"),\n\t\t\tDescription: pulumi.String(\"My dataset\"),\n\t\t\tLocation: pulumi.String(\"asia-northeast1\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = bigquery.NewDataTransferConfig(ctx, \"salesforce_config\", \u0026bigquery.DataTransferConfigArgs{\n\t\t\tDisplayName: pulumi.String(\"my-salesforce-config\"),\n\t\t\tLocation: pulumi.String(\"asia-northeast1\"),\n\t\t\tDataSourceId: pulumi.String(\"salesforce\"),\n\t\t\tSchedule: pulumi.String(\"first sunday of quarter 00:00\"),\n\t\t\tDestinationDatasetId: myDataset.DatasetId,\n\t\t\tParams: pulumi.StringMap{\n\t\t\t\t\"connector.authentication.oauth.clientId\": pulumi.String(\"client-id\"),\n\t\t\t\t\"connector.authentication.oauth.clientSecret\": pulumi.String(\"client-secret\"),\n\t\t\t\t\"connector.authentication.username\": pulumi.String(\"username\"),\n\t\t\t\t\"connector.authentication.password\": pulumi.String(\"password\"),\n\t\t\t\t\"connector.authentication.securityToken\": pulumi.String(\"security-token\"),\n\t\t\t\t\"assets\": pulumi.String(\"[\\\"asset-a\\\",\\\"asset-b\\\"]\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigquery.DataTransferConfig;\nimport com.pulumi.gcp.bigquery.DataTransferConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var myDataset = new Dataset(\"myDataset\", DatasetArgs.builder()\n .datasetId(\"my_dataset\")\n .description(\"My dataset\")\n .location(\"asia-northeast1\")\n .build());\n\n var salesforceConfig = new DataTransferConfig(\"salesforceConfig\", DataTransferConfigArgs.builder()\n .displayName(\"my-salesforce-config\")\n .location(\"asia-northeast1\")\n .dataSourceId(\"salesforce\")\n .schedule(\"first sunday of quarter 00:00\")\n .destinationDatasetId(myDataset.datasetId())\n .params(Map.ofEntries(\n Map.entry(\"connector.authentication.oauth.clientId\", \"client-id\"),\n Map.entry(\"connector.authentication.oauth.clientSecret\", \"client-secret\"),\n Map.entry(\"connector.authentication.username\", \"username\"),\n Map.entry(\"connector.authentication.password\", \"password\"),\n Map.entry(\"connector.authentication.securityToken\", \"security-token\"),\n Map.entry(\"assets\", \"[\\\"asset-a\\\",\\\"asset-b\\\"]\")\n ))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n myDataset:\n type: gcp:bigquery:Dataset\n name: my_dataset\n properties:\n datasetId: my_dataset\n description: My dataset\n location: asia-northeast1\n salesforceConfig:\n type: gcp:bigquery:DataTransferConfig\n name: salesforce_config\n properties:\n displayName: my-salesforce-config\n location: asia-northeast1\n dataSourceId: salesforce\n schedule: first sunday of quarter 00:00\n destinationDatasetId: ${myDataset.datasetId}\n params:\n connector.authentication.oauth.clientId: client-id\n connector.authentication.oauth.clientSecret: client-secret\n connector.authentication.username: username\n connector.authentication.password: password\n connector.authentication.securityToken: security-token\n assets: '[\"asset-a\",\"asset-b\"]'\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nConfig can be imported using any of these accepted formats:\n\n* `{{name}}`\n\nWhen using the `pulumi import` command, Config can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:bigquery/dataTransferConfig:DataTransferConfig default {{name}}\n```\n\n", + "description": "Represents a data transfer configuration. A transfer configuration\ncontains all metadata needed to perform a data transfer.\n\n\nTo get more information about Config, see:\n\n* [API documentation](https://cloud.google.com/bigquery/docs/reference/datatransfer/rest/v1/projects.locations.transferConfigs/create)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/bigquery/docs/reference/datatransfer/rest/)\n\n\n\n## Example Usage\n\n### Bigquerydatatransfer Config Scheduled Query\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst project = gcp.organizations.getProject({});\nconst permissions = new gcp.projects.IAMMember(\"permissions\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/iam.serviceAccountTokenCreator\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com`),\n});\nconst myDataset = new gcp.bigquery.Dataset(\"my_dataset\", {\n datasetId: \"my_dataset\",\n friendlyName: \"foo\",\n description: \"bar\",\n location: \"asia-northeast1\",\n}, {\n dependsOn: [permissions],\n});\nconst queryConfig = new gcp.bigquery.DataTransferConfig(\"query_config\", {\n displayName: \"my-query\",\n location: \"asia-northeast1\",\n dataSourceId: \"scheduled_query\",\n schedule: \"first sunday of quarter 00:00\",\n destinationDatasetId: myDataset.datasetId,\n params: {\n destination_table_name_template: \"my_table\",\n write_disposition: \"WRITE_APPEND\",\n query: \"SELECT name FROM tabl WHERE x = 'y'\",\n },\n}, {\n dependsOn: [permissions],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproject = gcp.organizations.get_project()\npermissions = gcp.projects.IAMMember(\"permissions\",\n project=project.project_id,\n role=\"roles/iam.serviceAccountTokenCreator\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\")\nmy_dataset = gcp.bigquery.Dataset(\"my_dataset\",\n dataset_id=\"my_dataset\",\n friendly_name=\"foo\",\n description=\"bar\",\n location=\"asia-northeast1\",\n opts = pulumi.ResourceOptions(depends_on=[permissions]))\nquery_config = gcp.bigquery.DataTransferConfig(\"query_config\",\n display_name=\"my-query\",\n location=\"asia-northeast1\",\n data_source_id=\"scheduled_query\",\n schedule=\"first sunday of quarter 00:00\",\n destination_dataset_id=my_dataset.dataset_id,\n params={\n \"destination_table_name_template\": \"my_table\",\n \"write_disposition\": \"WRITE_APPEND\",\n \"query\": \"SELECT name FROM tabl WHERE x = 'y'\",\n },\n opts = pulumi.ResourceOptions(depends_on=[permissions]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var permissions = new Gcp.Projects.IAMMember(\"permissions\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/iam.serviceAccountTokenCreator\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\",\n });\n\n var myDataset = new Gcp.BigQuery.Dataset(\"my_dataset\", new()\n {\n DatasetId = \"my_dataset\",\n FriendlyName = \"foo\",\n Description = \"bar\",\n Location = \"asia-northeast1\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n permissions,\n },\n });\n\n var queryConfig = new Gcp.BigQuery.DataTransferConfig(\"query_config\", new()\n {\n DisplayName = \"my-query\",\n Location = \"asia-northeast1\",\n DataSourceId = \"scheduled_query\",\n Schedule = \"first sunday of quarter 00:00\",\n DestinationDatasetId = myDataset.DatasetId,\n Params = \n {\n { \"destination_table_name_template\", \"my_table\" },\n { \"write_disposition\", \"WRITE_APPEND\" },\n { \"query\", \"SELECT name FROM tabl WHERE x = 'y'\" },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n permissions,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpermissions, err := projects.NewIAMMember(ctx, \"permissions\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/iam.serviceAccountTokenCreator\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmyDataset, err := bigquery.NewDataset(ctx, \"my_dataset\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"my_dataset\"),\n\t\t\tFriendlyName: pulumi.String(\"foo\"),\n\t\t\tDescription: pulumi.String(\"bar\"),\n\t\t\tLocation: pulumi.String(\"asia-northeast1\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tpermissions,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = bigquery.NewDataTransferConfig(ctx, \"query_config\", \u0026bigquery.DataTransferConfigArgs{\n\t\t\tDisplayName: pulumi.String(\"my-query\"),\n\t\t\tLocation: pulumi.String(\"asia-northeast1\"),\n\t\t\tDataSourceId: pulumi.String(\"scheduled_query\"),\n\t\t\tSchedule: pulumi.String(\"first sunday of quarter 00:00\"),\n\t\t\tDestinationDatasetId: myDataset.DatasetId,\n\t\t\tParams: pulumi.StringMap{\n\t\t\t\t\"destination_table_name_template\": pulumi.String(\"my_table\"),\n\t\t\t\t\"write_disposition\": pulumi.String(\"WRITE_APPEND\"),\n\t\t\t\t\"query\": pulumi.String(\"SELECT name FROM tabl WHERE x = 'y'\"),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tpermissions,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.projects.IAMMember;\nimport com.pulumi.gcp.projects.IAMMemberArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigquery.DataTransferConfig;\nimport com.pulumi.gcp.bigquery.DataTransferConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var permissions = new IAMMember(\"permissions\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/iam.serviceAccountTokenCreator\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var myDataset = new Dataset(\"myDataset\", DatasetArgs.builder()\n .datasetId(\"my_dataset\")\n .friendlyName(\"foo\")\n .description(\"bar\")\n .location(\"asia-northeast1\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(permissions)\n .build());\n\n var queryConfig = new DataTransferConfig(\"queryConfig\", DataTransferConfigArgs.builder()\n .displayName(\"my-query\")\n .location(\"asia-northeast1\")\n .dataSourceId(\"scheduled_query\")\n .schedule(\"first sunday of quarter 00:00\")\n .destinationDatasetId(myDataset.datasetId())\n .params(Map.ofEntries(\n Map.entry(\"destination_table_name_template\", \"my_table\"),\n Map.entry(\"write_disposition\", \"WRITE_APPEND\"),\n Map.entry(\"query\", \"SELECT name FROM tabl WHERE x = 'y'\")\n ))\n .build(), CustomResourceOptions.builder()\n .dependsOn(permissions)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n permissions:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/iam.serviceAccountTokenCreator\n member: serviceAccount:service-${project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\n queryConfig:\n type: gcp:bigquery:DataTransferConfig\n name: query_config\n properties:\n displayName: my-query\n location: asia-northeast1\n dataSourceId: scheduled_query\n schedule: first sunday of quarter 00:00\n destinationDatasetId: ${myDataset.datasetId}\n params:\n destination_table_name_template: my_table\n write_disposition: WRITE_APPEND\n query: SELECT name FROM tabl WHERE x = 'y'\n options:\n dependson:\n - ${permissions}\n myDataset:\n type: gcp:bigquery:Dataset\n name: my_dataset\n properties:\n datasetId: my_dataset\n friendlyName: foo\n description: bar\n location: asia-northeast1\n options:\n dependson:\n - ${permissions}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Bigquerydatatransfer Config Cmek\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst project = gcp.organizations.getProject({});\nconst permissions = new gcp.projects.IAMMember(\"permissions\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/iam.serviceAccountTokenCreator\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com`),\n});\nconst myDataset = new gcp.bigquery.Dataset(\"my_dataset\", {\n datasetId: \"example_dataset\",\n friendlyName: \"foo\",\n description: \"bar\",\n location: \"asia-northeast1\",\n}, {\n dependsOn: [permissions],\n});\nconst keyRing = new gcp.kms.KeyRing(\"key_ring\", {\n name: \"example-keyring\",\n location: \"us\",\n});\nconst cryptoKey = new gcp.kms.CryptoKey(\"crypto_key\", {\n name: \"example-key\",\n keyRing: keyRing.id,\n});\nconst queryConfigCmek = new gcp.bigquery.DataTransferConfig(\"query_config_cmek\", {\n displayName: \"\",\n location: \"asia-northeast1\",\n dataSourceId: \"scheduled_query\",\n schedule: \"first sunday of quarter 00:00\",\n destinationDatasetId: myDataset.datasetId,\n params: {\n destination_table_name_template: \"my_table\",\n write_disposition: \"WRITE_APPEND\",\n query: \"SELECT name FROM tabl WHERE x = 'y'\",\n },\n encryptionConfiguration: {\n kmsKeyName: cryptoKey.id,\n },\n}, {\n dependsOn: [permissions],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproject = gcp.organizations.get_project()\npermissions = gcp.projects.IAMMember(\"permissions\",\n project=project.project_id,\n role=\"roles/iam.serviceAccountTokenCreator\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\")\nmy_dataset = gcp.bigquery.Dataset(\"my_dataset\",\n dataset_id=\"example_dataset\",\n friendly_name=\"foo\",\n description=\"bar\",\n location=\"asia-northeast1\",\n opts = pulumi.ResourceOptions(depends_on=[permissions]))\nkey_ring = gcp.kms.KeyRing(\"key_ring\",\n name=\"example-keyring\",\n location=\"us\")\ncrypto_key = gcp.kms.CryptoKey(\"crypto_key\",\n name=\"example-key\",\n key_ring=key_ring.id)\nquery_config_cmek = gcp.bigquery.DataTransferConfig(\"query_config_cmek\",\n display_name=\"\",\n location=\"asia-northeast1\",\n data_source_id=\"scheduled_query\",\n schedule=\"first sunday of quarter 00:00\",\n destination_dataset_id=my_dataset.dataset_id,\n params={\n \"destination_table_name_template\": \"my_table\",\n \"write_disposition\": \"WRITE_APPEND\",\n \"query\": \"SELECT name FROM tabl WHERE x = 'y'\",\n },\n encryption_configuration={\n \"kms_key_name\": crypto_key.id,\n },\n opts = pulumi.ResourceOptions(depends_on=[permissions]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var permissions = new Gcp.Projects.IAMMember(\"permissions\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/iam.serviceAccountTokenCreator\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\",\n });\n\n var myDataset = new Gcp.BigQuery.Dataset(\"my_dataset\", new()\n {\n DatasetId = \"example_dataset\",\n FriendlyName = \"foo\",\n Description = \"bar\",\n Location = \"asia-northeast1\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n permissions,\n },\n });\n\n var keyRing = new Gcp.Kms.KeyRing(\"key_ring\", new()\n {\n Name = \"example-keyring\",\n Location = \"us\",\n });\n\n var cryptoKey = new Gcp.Kms.CryptoKey(\"crypto_key\", new()\n {\n Name = \"example-key\",\n KeyRing = keyRing.Id,\n });\n\n var queryConfigCmek = new Gcp.BigQuery.DataTransferConfig(\"query_config_cmek\", new()\n {\n DisplayName = \"\",\n Location = \"asia-northeast1\",\n DataSourceId = \"scheduled_query\",\n Schedule = \"first sunday of quarter 00:00\",\n DestinationDatasetId = myDataset.DatasetId,\n Params = \n {\n { \"destination_table_name_template\", \"my_table\" },\n { \"write_disposition\", \"WRITE_APPEND\" },\n { \"query\", \"SELECT name FROM tabl WHERE x = 'y'\" },\n },\n EncryptionConfiguration = new Gcp.BigQuery.Inputs.DataTransferConfigEncryptionConfigurationArgs\n {\n KmsKeyName = cryptoKey.Id,\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n permissions,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpermissions, err := projects.NewIAMMember(ctx, \"permissions\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/iam.serviceAccountTokenCreator\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmyDataset, err := bigquery.NewDataset(ctx, \"my_dataset\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"example_dataset\"),\n\t\t\tFriendlyName: pulumi.String(\"foo\"),\n\t\t\tDescription: pulumi.String(\"bar\"),\n\t\t\tLocation: pulumi.String(\"asia-northeast1\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tpermissions,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tkeyRing, err := kms.NewKeyRing(ctx, \"key_ring\", \u0026kms.KeyRingArgs{\n\t\t\tName: pulumi.String(\"example-keyring\"),\n\t\t\tLocation: pulumi.String(\"us\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcryptoKey, err := kms.NewCryptoKey(ctx, \"crypto_key\", \u0026kms.CryptoKeyArgs{\n\t\t\tName: pulumi.String(\"example-key\"),\n\t\t\tKeyRing: keyRing.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = bigquery.NewDataTransferConfig(ctx, \"query_config_cmek\", \u0026bigquery.DataTransferConfigArgs{\n\t\t\tDisplayName: pulumi.String(\"\"),\n\t\t\tLocation: pulumi.String(\"asia-northeast1\"),\n\t\t\tDataSourceId: pulumi.String(\"scheduled_query\"),\n\t\t\tSchedule: pulumi.String(\"first sunday of quarter 00:00\"),\n\t\t\tDestinationDatasetId: myDataset.DatasetId,\n\t\t\tParams: pulumi.StringMap{\n\t\t\t\t\"destination_table_name_template\": pulumi.String(\"my_table\"),\n\t\t\t\t\"write_disposition\": pulumi.String(\"WRITE_APPEND\"),\n\t\t\t\t\"query\": pulumi.String(\"SELECT name FROM tabl WHERE x = 'y'\"),\n\t\t\t},\n\t\t\tEncryptionConfiguration: \u0026bigquery.DataTransferConfigEncryptionConfigurationArgs{\n\t\t\t\tKmsKeyName: cryptoKey.ID(),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tpermissions,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.projects.IAMMember;\nimport com.pulumi.gcp.projects.IAMMemberArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.kms.KeyRing;\nimport com.pulumi.gcp.kms.KeyRingArgs;\nimport com.pulumi.gcp.kms.CryptoKey;\nimport com.pulumi.gcp.kms.CryptoKeyArgs;\nimport com.pulumi.gcp.bigquery.DataTransferConfig;\nimport com.pulumi.gcp.bigquery.DataTransferConfigArgs;\nimport com.pulumi.gcp.bigquery.inputs.DataTransferConfigEncryptionConfigurationArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var permissions = new IAMMember(\"permissions\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/iam.serviceAccountTokenCreator\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var myDataset = new Dataset(\"myDataset\", DatasetArgs.builder()\n .datasetId(\"example_dataset\")\n .friendlyName(\"foo\")\n .description(\"bar\")\n .location(\"asia-northeast1\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(permissions)\n .build());\n\n var keyRing = new KeyRing(\"keyRing\", KeyRingArgs.builder()\n .name(\"example-keyring\")\n .location(\"us\")\n .build());\n\n var cryptoKey = new CryptoKey(\"cryptoKey\", CryptoKeyArgs.builder()\n .name(\"example-key\")\n .keyRing(keyRing.id())\n .build());\n\n var queryConfigCmek = new DataTransferConfig(\"queryConfigCmek\", DataTransferConfigArgs.builder()\n .displayName(\"\")\n .location(\"asia-northeast1\")\n .dataSourceId(\"scheduled_query\")\n .schedule(\"first sunday of quarter 00:00\")\n .destinationDatasetId(myDataset.datasetId())\n .params(Map.ofEntries(\n Map.entry(\"destination_table_name_template\", \"my_table\"),\n Map.entry(\"write_disposition\", \"WRITE_APPEND\"),\n Map.entry(\"query\", \"SELECT name FROM tabl WHERE x = 'y'\")\n ))\n .encryptionConfiguration(DataTransferConfigEncryptionConfigurationArgs.builder()\n .kmsKeyName(cryptoKey.id())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(permissions)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n permissions:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/iam.serviceAccountTokenCreator\n member: serviceAccount:service-${project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com\n queryConfigCmek:\n type: gcp:bigquery:DataTransferConfig\n name: query_config_cmek\n properties:\n displayName:\n location: asia-northeast1\n dataSourceId: scheduled_query\n schedule: first sunday of quarter 00:00\n destinationDatasetId: ${myDataset.datasetId}\n params:\n destination_table_name_template: my_table\n write_disposition: WRITE_APPEND\n query: SELECT name FROM tabl WHERE x = 'y'\n encryptionConfiguration:\n kmsKeyName: ${cryptoKey.id}\n options:\n dependson:\n - ${permissions}\n myDataset:\n type: gcp:bigquery:Dataset\n name: my_dataset\n properties:\n datasetId: example_dataset\n friendlyName: foo\n description: bar\n location: asia-northeast1\n options:\n dependson:\n - ${permissions}\n cryptoKey:\n type: gcp:kms:CryptoKey\n name: crypto_key\n properties:\n name: example-key\n keyRing: ${keyRing.id}\n keyRing:\n type: gcp:kms:KeyRing\n name: key_ring\n properties:\n name: example-keyring\n location: us\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Bigquerydatatransfer Config Salesforce\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst project = gcp.organizations.getProject({});\nconst myDataset = new gcp.bigquery.Dataset(\"my_dataset\", {\n datasetId: \"my_dataset\",\n description: \"My dataset\",\n location: \"asia-northeast1\",\n});\nconst salesforceConfig = new gcp.bigquery.DataTransferConfig(\"salesforce_config\", {\n displayName: \"my-salesforce-config\",\n location: \"asia-northeast1\",\n dataSourceId: \"salesforce\",\n schedule: \"first sunday of quarter 00:00\",\n destinationDatasetId: myDataset.datasetId,\n params: {\n \"connector.authentication.oauth.clientId\": \"client-id\",\n \"connector.authentication.oauth.clientSecret\": \"client-secret\",\n \"connector.authentication.oauth.myDomain\": \"MyDomainName\",\n assets: \"[\\\"asset-a\\\",\\\"asset-b\\\"]\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproject = gcp.organizations.get_project()\nmy_dataset = gcp.bigquery.Dataset(\"my_dataset\",\n dataset_id=\"my_dataset\",\n description=\"My dataset\",\n location=\"asia-northeast1\")\nsalesforce_config = gcp.bigquery.DataTransferConfig(\"salesforce_config\",\n display_name=\"my-salesforce-config\",\n location=\"asia-northeast1\",\n data_source_id=\"salesforce\",\n schedule=\"first sunday of quarter 00:00\",\n destination_dataset_id=my_dataset.dataset_id,\n params={\n \"connector.authentication.oauth.clientId\": \"client-id\",\n \"connector.authentication.oauth.clientSecret\": \"client-secret\",\n \"connector.authentication.oauth.myDomain\": \"MyDomainName\",\n \"assets\": \"[\\\"asset-a\\\",\\\"asset-b\\\"]\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var myDataset = new Gcp.BigQuery.Dataset(\"my_dataset\", new()\n {\n DatasetId = \"my_dataset\",\n Description = \"My dataset\",\n Location = \"asia-northeast1\",\n });\n\n var salesforceConfig = new Gcp.BigQuery.DataTransferConfig(\"salesforce_config\", new()\n {\n DisplayName = \"my-salesforce-config\",\n Location = \"asia-northeast1\",\n DataSourceId = \"salesforce\",\n Schedule = \"first sunday of quarter 00:00\",\n DestinationDatasetId = myDataset.DatasetId,\n Params = \n {\n { \"connector.authentication.oauth.clientId\", \"client-id\" },\n { \"connector.authentication.oauth.clientSecret\", \"client-secret\" },\n { \"connector.authentication.oauth.myDomain\", \"MyDomainName\" },\n { \"assets\", \"[\\\"asset-a\\\",\\\"asset-b\\\"]\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmyDataset, err := bigquery.NewDataset(ctx, \"my_dataset\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"my_dataset\"),\n\t\t\tDescription: pulumi.String(\"My dataset\"),\n\t\t\tLocation: pulumi.String(\"asia-northeast1\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = bigquery.NewDataTransferConfig(ctx, \"salesforce_config\", \u0026bigquery.DataTransferConfigArgs{\n\t\t\tDisplayName: pulumi.String(\"my-salesforce-config\"),\n\t\t\tLocation: pulumi.String(\"asia-northeast1\"),\n\t\t\tDataSourceId: pulumi.String(\"salesforce\"),\n\t\t\tSchedule: pulumi.String(\"first sunday of quarter 00:00\"),\n\t\t\tDestinationDatasetId: myDataset.DatasetId,\n\t\t\tParams: pulumi.StringMap{\n\t\t\t\t\"connector.authentication.oauth.clientId\": pulumi.String(\"client-id\"),\n\t\t\t\t\"connector.authentication.oauth.clientSecret\": pulumi.String(\"client-secret\"),\n\t\t\t\t\"connector.authentication.oauth.myDomain\": pulumi.String(\"MyDomainName\"),\n\t\t\t\t\"assets\": pulumi.String(\"[\\\"asset-a\\\",\\\"asset-b\\\"]\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigquery.DataTransferConfig;\nimport com.pulumi.gcp.bigquery.DataTransferConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var myDataset = new Dataset(\"myDataset\", DatasetArgs.builder()\n .datasetId(\"my_dataset\")\n .description(\"My dataset\")\n .location(\"asia-northeast1\")\n .build());\n\n var salesforceConfig = new DataTransferConfig(\"salesforceConfig\", DataTransferConfigArgs.builder()\n .displayName(\"my-salesforce-config\")\n .location(\"asia-northeast1\")\n .dataSourceId(\"salesforce\")\n .schedule(\"first sunday of quarter 00:00\")\n .destinationDatasetId(myDataset.datasetId())\n .params(Map.ofEntries(\n Map.entry(\"connector.authentication.oauth.clientId\", \"client-id\"),\n Map.entry(\"connector.authentication.oauth.clientSecret\", \"client-secret\"),\n Map.entry(\"connector.authentication.oauth.myDomain\", \"MyDomainName\"),\n Map.entry(\"assets\", \"[\\\"asset-a\\\",\\\"asset-b\\\"]\")\n ))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n myDataset:\n type: gcp:bigquery:Dataset\n name: my_dataset\n properties:\n datasetId: my_dataset\n description: My dataset\n location: asia-northeast1\n salesforceConfig:\n type: gcp:bigquery:DataTransferConfig\n name: salesforce_config\n properties:\n displayName: my-salesforce-config\n location: asia-northeast1\n dataSourceId: salesforce\n schedule: first sunday of quarter 00:00\n destinationDatasetId: ${myDataset.datasetId}\n params:\n connector.authentication.oauth.clientId: client-id\n connector.authentication.oauth.clientSecret: client-secret\n connector.authentication.oauth.myDomain: MyDomainName\n assets: '[\"asset-a\",\"asset-b\"]'\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nConfig can be imported using any of these accepted formats:\n\n* `{{name}}`\n\nWhen using the `pulumi import` command, Config can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:bigquery/dataTransferConfig:DataTransferConfig default {{name}}\n```\n\n", "properties": { "dataRefreshWindowDays": { "type": "integer", @@ -124746,6 +126235,10 @@ "$ref": "#/types/gcp:bigquery/DataTransferConfigEmailPreferences:DataTransferConfigEmailPreferences", "description": "Email notifications will be sent according to these preferences to the\nemail address of the user who owns this transfer config.\nStructure is documented below.\n" }, + "encryptionConfiguration": { + "$ref": "#/types/gcp:bigquery/DataTransferConfigEncryptionConfiguration:DataTransferConfigEncryptionConfiguration", + "description": "Represents the encryption configuration for a transfer.\nStructure is documented below.\n" + }, "location": { "type": "string", "description": "The geographic location where the transfer config should reside.\nExamples: US, EU, asia-northeast1. The default value is US.\n" @@ -124819,6 +126312,10 @@ "$ref": "#/types/gcp:bigquery/DataTransferConfigEmailPreferences:DataTransferConfigEmailPreferences", "description": "Email notifications will be sent according to these preferences to the\nemail address of the user who owns this transfer config.\nStructure is documented below.\n" }, + "encryptionConfiguration": { + "$ref": "#/types/gcp:bigquery/DataTransferConfigEncryptionConfiguration:DataTransferConfigEncryptionConfiguration", + "description": "Represents the encryption configuration for a transfer.\nStructure is documented below.\n" + }, "location": { "type": "string", "description": "The geographic location where the transfer config should reside.\nExamples: US, EU, asia-northeast1. The default value is US.\n", @@ -124890,6 +126387,10 @@ "$ref": "#/types/gcp:bigquery/DataTransferConfigEmailPreferences:DataTransferConfigEmailPreferences", "description": "Email notifications will be sent according to these preferences to the\nemail address of the user who owns this transfer config.\nStructure is documented below.\n" }, + "encryptionConfiguration": { + "$ref": "#/types/gcp:bigquery/DataTransferConfigEncryptionConfiguration:DataTransferConfigEncryptionConfiguration", + "description": "Represents the encryption configuration for a transfer.\nStructure is documented below.\n" + }, "location": { "type": "string", "description": "The geographic location where the transfer config should reside.\nExamples: US, EU, asia-northeast1. The default value is US.\n", @@ -127174,7 +128675,7 @@ } }, "gcp:bigqueryanalyticshub/dataExchange:DataExchange": { - "description": "A Bigquery Analytics Hub data exchange\n\n\nTo get more information about DataExchange, see:\n\n* [API documentation](https://cloud.google.com/bigquery/docs/reference/analytics-hub/rest/v1/projects.locations.dataExchanges)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/bigquery/docs/analytics-hub-introduction)\n\n## Example Usage\n\n### Bigquery Analyticshub Data Exchange Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst dataExchange = new gcp.bigqueryanalyticshub.DataExchange(\"data_exchange\", {\n location: \"US\",\n dataExchangeId: \"my_data_exchange\",\n displayName: \"my_data_exchange\",\n description: \"example data exchange\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndata_exchange = gcp.bigqueryanalyticshub.DataExchange(\"data_exchange\",\n location=\"US\",\n data_exchange_id=\"my_data_exchange\",\n display_name=\"my_data_exchange\",\n description=\"example data exchange\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var dataExchange = new Gcp.BigQueryAnalyticsHub.DataExchange(\"data_exchange\", new()\n {\n Location = \"US\",\n DataExchangeId = \"my_data_exchange\",\n DisplayName = \"my_data_exchange\",\n Description = \"example data exchange\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigqueryanalyticshub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := bigqueryanalyticshub.NewDataExchange(ctx, \"data_exchange\", \u0026bigqueryanalyticshub.DataExchangeArgs{\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDataExchangeId: pulumi.String(\"my_data_exchange\"),\n\t\t\tDisplayName: pulumi.String(\"my_data_exchange\"),\n\t\t\tDescription: pulumi.String(\"example data exchange\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchange;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchangeArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var dataExchange = new DataExchange(\"dataExchange\", DataExchangeArgs.builder()\n .location(\"US\")\n .dataExchangeId(\"my_data_exchange\")\n .displayName(\"my_data_exchange\")\n .description(\"example data exchange\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n dataExchange:\n type: gcp:bigqueryanalyticshub:DataExchange\n name: data_exchange\n properties:\n location: US\n dataExchangeId: my_data_exchange\n displayName: my_data_exchange\n description: example data exchange\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nDataExchange can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}`\n\n* `{{project}}/{{location}}/{{data_exchange_id}}`\n\n* `{{location}}/{{data_exchange_id}}`\n\n* `{{data_exchange_id}}`\n\nWhen using the `pulumi import` command, DataExchange can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:bigqueryanalyticshub/dataExchange:DataExchange default projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}\n```\n\n```sh\n$ pulumi import gcp:bigqueryanalyticshub/dataExchange:DataExchange default {{project}}/{{location}}/{{data_exchange_id}}\n```\n\n```sh\n$ pulumi import gcp:bigqueryanalyticshub/dataExchange:DataExchange default {{location}}/{{data_exchange_id}}\n```\n\n```sh\n$ pulumi import gcp:bigqueryanalyticshub/dataExchange:DataExchange default {{data_exchange_id}}\n```\n\n", + "description": "A Bigquery Analytics Hub data exchange\n\n\nTo get more information about DataExchange, see:\n\n* [API documentation](https://cloud.google.com/bigquery/docs/reference/analytics-hub/rest/v1/projects.locations.dataExchanges)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/bigquery/docs/analytics-hub-introduction)\n\n## Example Usage\n\n### Bigquery Analyticshub Data Exchange Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst dataExchange = new gcp.bigqueryanalyticshub.DataExchange(\"data_exchange\", {\n location: \"US\",\n dataExchangeId: \"my_data_exchange\",\n displayName: \"my_data_exchange\",\n description: \"example data exchange\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndata_exchange = gcp.bigqueryanalyticshub.DataExchange(\"data_exchange\",\n location=\"US\",\n data_exchange_id=\"my_data_exchange\",\n display_name=\"my_data_exchange\",\n description=\"example data exchange\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var dataExchange = new Gcp.BigQueryAnalyticsHub.DataExchange(\"data_exchange\", new()\n {\n Location = \"US\",\n DataExchangeId = \"my_data_exchange\",\n DisplayName = \"my_data_exchange\",\n Description = \"example data exchange\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigqueryanalyticshub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := bigqueryanalyticshub.NewDataExchange(ctx, \"data_exchange\", \u0026bigqueryanalyticshub.DataExchangeArgs{\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDataExchangeId: pulumi.String(\"my_data_exchange\"),\n\t\t\tDisplayName: pulumi.String(\"my_data_exchange\"),\n\t\t\tDescription: pulumi.String(\"example data exchange\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchange;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchangeArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var dataExchange = new DataExchange(\"dataExchange\", DataExchangeArgs.builder()\n .location(\"US\")\n .dataExchangeId(\"my_data_exchange\")\n .displayName(\"my_data_exchange\")\n .description(\"example data exchange\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n dataExchange:\n type: gcp:bigqueryanalyticshub:DataExchange\n name: data_exchange\n properties:\n location: US\n dataExchangeId: my_data_exchange\n displayName: my_data_exchange\n description: example data exchange\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Bigquery Analyticshub Data Exchange Dcr\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst dataExchange = new gcp.bigqueryanalyticshub.DataExchange(\"data_exchange\", {\n location: \"US\",\n dataExchangeId: \"dcr_data_exchange\",\n displayName: \"dcr_data_exchange\",\n description: \"example dcr data exchange\",\n sharingEnvironmentConfig: {\n dcrExchangeConfig: {},\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndata_exchange = gcp.bigqueryanalyticshub.DataExchange(\"data_exchange\",\n location=\"US\",\n data_exchange_id=\"dcr_data_exchange\",\n display_name=\"dcr_data_exchange\",\n description=\"example dcr data exchange\",\n sharing_environment_config={\n \"dcr_exchange_config\": {},\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var dataExchange = new Gcp.BigQueryAnalyticsHub.DataExchange(\"data_exchange\", new()\n {\n Location = \"US\",\n DataExchangeId = \"dcr_data_exchange\",\n DisplayName = \"dcr_data_exchange\",\n Description = \"example dcr data exchange\",\n SharingEnvironmentConfig = new Gcp.BigQueryAnalyticsHub.Inputs.DataExchangeSharingEnvironmentConfigArgs\n {\n DcrExchangeConfig = null,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigqueryanalyticshub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := bigqueryanalyticshub.NewDataExchange(ctx, \"data_exchange\", \u0026bigqueryanalyticshub.DataExchangeArgs{\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDataExchangeId: pulumi.String(\"dcr_data_exchange\"),\n\t\t\tDisplayName: pulumi.String(\"dcr_data_exchange\"),\n\t\t\tDescription: pulumi.String(\"example dcr data exchange\"),\n\t\t\tSharingEnvironmentConfig: \u0026bigqueryanalyticshub.DataExchangeSharingEnvironmentConfigArgs{\n\t\t\t\tDcrExchangeConfig: nil,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchange;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchangeArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.inputs.DataExchangeSharingEnvironmentConfigArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.inputs.DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var dataExchange = new DataExchange(\"dataExchange\", DataExchangeArgs.builder()\n .location(\"US\")\n .dataExchangeId(\"dcr_data_exchange\")\n .displayName(\"dcr_data_exchange\")\n .description(\"example dcr data exchange\")\n .sharingEnvironmentConfig(DataExchangeSharingEnvironmentConfigArgs.builder()\n .dcrExchangeConfig()\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n dataExchange:\n type: gcp:bigqueryanalyticshub:DataExchange\n name: data_exchange\n properties:\n location: US\n dataExchangeId: dcr_data_exchange\n displayName: dcr_data_exchange\n description: example dcr data exchange\n sharingEnvironmentConfig:\n dcrExchangeConfig: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nDataExchange can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}`\n\n* `{{project}}/{{location}}/{{data_exchange_id}}`\n\n* `{{location}}/{{data_exchange_id}}`\n\n* `{{data_exchange_id}}`\n\nWhen using the `pulumi import` command, DataExchange can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:bigqueryanalyticshub/dataExchange:DataExchange default projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}\n```\n\n```sh\n$ pulumi import gcp:bigqueryanalyticshub/dataExchange:DataExchange default {{project}}/{{location}}/{{data_exchange_id}}\n```\n\n```sh\n$ pulumi import gcp:bigqueryanalyticshub/dataExchange:DataExchange default {{location}}/{{data_exchange_id}}\n```\n\n```sh\n$ pulumi import gcp:bigqueryanalyticshub/dataExchange:DataExchange default {{data_exchange_id}}\n```\n\n", "properties": { "dataExchangeId": { "type": "string", @@ -127215,6 +128716,10 @@ "project": { "type": "string", "description": "The ID of the project in which the resource belongs.\nIf it is not provided, the provider project is used.\n" + }, + "sharingEnvironmentConfig": { + "$ref": "#/types/gcp:bigqueryanalyticshub/DataExchangeSharingEnvironmentConfig:DataExchangeSharingEnvironmentConfig", + "description": "Configurable data sharing environment option for a data exchange.\nThis field is required for data clean room exchanges.\nStructure is documented below.\n" } }, "required": [ @@ -127223,7 +128728,8 @@ "listingCount", "location", "name", - "project" + "project", + "sharingEnvironmentConfig" ], "inputProperties": { "dataExchangeId": { @@ -127260,6 +128766,11 @@ "type": "string", "description": "The ID of the project in which the resource belongs.\nIf it is not provided, the provider project is used.\n", "willReplaceOnChanges": true + }, + "sharingEnvironmentConfig": { + "$ref": "#/types/gcp:bigqueryanalyticshub/DataExchangeSharingEnvironmentConfig:DataExchangeSharingEnvironmentConfig", + "description": "Configurable data sharing environment option for a data exchange.\nThis field is required for data clean room exchanges.\nStructure is documented below.\n", + "willReplaceOnChanges": true } }, "requiredInputs": [ @@ -127312,6 +128823,11 @@ "type": "string", "description": "The ID of the project in which the resource belongs.\nIf it is not provided, the provider project is used.\n", "willReplaceOnChanges": true + }, + "sharingEnvironmentConfig": { + "$ref": "#/types/gcp:bigqueryanalyticshub/DataExchangeSharingEnvironmentConfig:DataExchangeSharingEnvironmentConfig", + "description": "Configurable data sharing environment option for a data exchange.\nThis field is required for data clean room exchanges.\nStructure is documented below.\n", + "willReplaceOnChanges": true } }, "type": "object" @@ -127641,7 +129157,7 @@ } }, "gcp:bigqueryanalyticshub/listing:Listing": { - "description": "A Bigquery Analytics Hub data exchange listing\n\n\nTo get more information about Listing, see:\n\n* [API documentation](https://cloud.google.com/bigquery/docs/reference/analytics-hub/rest/v1/projects.locations.dataExchanges.listings)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/bigquery/docs/analytics-hub-introduction)\n\n## Example Usage\n\n### Bigquery Analyticshub Listing Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst listing = new gcp.bigqueryanalyticshub.DataExchange(\"listing\", {\n location: \"US\",\n dataExchangeId: \"my_data_exchange\",\n displayName: \"my_data_exchange\",\n description: \"example data exchange\",\n});\nconst listingDataset = new gcp.bigquery.Dataset(\"listing\", {\n datasetId: \"my_listing\",\n friendlyName: \"my_listing\",\n description: \"example data exchange\",\n location: \"US\",\n});\nconst listingListing = new gcp.bigqueryanalyticshub.Listing(\"listing\", {\n location: \"US\",\n dataExchangeId: listing.dataExchangeId,\n listingId: \"my_listing\",\n displayName: \"my_listing\",\n description: \"example data exchange\",\n bigqueryDataset: {\n dataset: listingDataset.id,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nlisting = gcp.bigqueryanalyticshub.DataExchange(\"listing\",\n location=\"US\",\n data_exchange_id=\"my_data_exchange\",\n display_name=\"my_data_exchange\",\n description=\"example data exchange\")\nlisting_dataset = gcp.bigquery.Dataset(\"listing\",\n dataset_id=\"my_listing\",\n friendly_name=\"my_listing\",\n description=\"example data exchange\",\n location=\"US\")\nlisting_listing = gcp.bigqueryanalyticshub.Listing(\"listing\",\n location=\"US\",\n data_exchange_id=listing.data_exchange_id,\n listing_id=\"my_listing\",\n display_name=\"my_listing\",\n description=\"example data exchange\",\n bigquery_dataset={\n \"dataset\": listing_dataset.id,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var listing = new Gcp.BigQueryAnalyticsHub.DataExchange(\"listing\", new()\n {\n Location = \"US\",\n DataExchangeId = \"my_data_exchange\",\n DisplayName = \"my_data_exchange\",\n Description = \"example data exchange\",\n });\n\n var listingDataset = new Gcp.BigQuery.Dataset(\"listing\", new()\n {\n DatasetId = \"my_listing\",\n FriendlyName = \"my_listing\",\n Description = \"example data exchange\",\n Location = \"US\",\n });\n\n var listingListing = new Gcp.BigQueryAnalyticsHub.Listing(\"listing\", new()\n {\n Location = \"US\",\n DataExchangeId = listing.DataExchangeId,\n ListingId = \"my_listing\",\n DisplayName = \"my_listing\",\n Description = \"example data exchange\",\n BigqueryDataset = new Gcp.BigQueryAnalyticsHub.Inputs.ListingBigqueryDatasetArgs\n {\n Dataset = listingDataset.Id,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigqueryanalyticshub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tlisting, err := bigqueryanalyticshub.NewDataExchange(ctx, \"listing\", \u0026bigqueryanalyticshub.DataExchangeArgs{\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDataExchangeId: pulumi.String(\"my_data_exchange\"),\n\t\t\tDisplayName: pulumi.String(\"my_data_exchange\"),\n\t\t\tDescription: pulumi.String(\"example data exchange\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlistingDataset, err := bigquery.NewDataset(ctx, \"listing\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"my_listing\"),\n\t\t\tFriendlyName: pulumi.String(\"my_listing\"),\n\t\t\tDescription: pulumi.String(\"example data exchange\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = bigqueryanalyticshub.NewListing(ctx, \"listing\", \u0026bigqueryanalyticshub.ListingArgs{\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDataExchangeId: listing.DataExchangeId,\n\t\t\tListingId: pulumi.String(\"my_listing\"),\n\t\t\tDisplayName: pulumi.String(\"my_listing\"),\n\t\t\tDescription: pulumi.String(\"example data exchange\"),\n\t\t\tBigqueryDataset: \u0026bigqueryanalyticshub.ListingBigqueryDatasetArgs{\n\t\t\t\tDataset: listingDataset.ID(),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchange;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchangeArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.Listing;\nimport com.pulumi.gcp.bigqueryanalyticshub.ListingArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.inputs.ListingBigqueryDatasetArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var listing = new DataExchange(\"listing\", DataExchangeArgs.builder()\n .location(\"US\")\n .dataExchangeId(\"my_data_exchange\")\n .displayName(\"my_data_exchange\")\n .description(\"example data exchange\")\n .build());\n\n var listingDataset = new Dataset(\"listingDataset\", DatasetArgs.builder()\n .datasetId(\"my_listing\")\n .friendlyName(\"my_listing\")\n .description(\"example data exchange\")\n .location(\"US\")\n .build());\n\n var listingListing = new Listing(\"listingListing\", ListingArgs.builder()\n .location(\"US\")\n .dataExchangeId(listing.dataExchangeId())\n .listingId(\"my_listing\")\n .displayName(\"my_listing\")\n .description(\"example data exchange\")\n .bigqueryDataset(ListingBigqueryDatasetArgs.builder()\n .dataset(listingDataset.id())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n listing:\n type: gcp:bigqueryanalyticshub:DataExchange\n properties:\n location: US\n dataExchangeId: my_data_exchange\n displayName: my_data_exchange\n description: example data exchange\n listingListing:\n type: gcp:bigqueryanalyticshub:Listing\n name: listing\n properties:\n location: US\n dataExchangeId: ${listing.dataExchangeId}\n listingId: my_listing\n displayName: my_listing\n description: example data exchange\n bigqueryDataset:\n dataset: ${listingDataset.id}\n listingDataset:\n type: gcp:bigquery:Dataset\n name: listing\n properties:\n datasetId: my_listing\n friendlyName: my_listing\n description: example data exchange\n location: US\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Bigquery Analyticshub Listing Restricted\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst listing = new gcp.bigqueryanalyticshub.DataExchange(\"listing\", {\n location: \"US\",\n dataExchangeId: \"my_data_exchange\",\n displayName: \"my_data_exchange\",\n description: \"example data exchange\",\n});\nconst listingDataset = new gcp.bigquery.Dataset(\"listing\", {\n datasetId: \"my_listing\",\n friendlyName: \"my_listing\",\n description: \"example data exchange\",\n location: \"US\",\n});\nconst listingListing = new gcp.bigqueryanalyticshub.Listing(\"listing\", {\n location: \"US\",\n dataExchangeId: listing.dataExchangeId,\n listingId: \"my_listing\",\n displayName: \"my_listing\",\n description: \"example data exchange\",\n bigqueryDataset: {\n dataset: listingDataset.id,\n },\n restrictedExportConfig: {\n enabled: true,\n restrictQueryResult: true,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nlisting = gcp.bigqueryanalyticshub.DataExchange(\"listing\",\n location=\"US\",\n data_exchange_id=\"my_data_exchange\",\n display_name=\"my_data_exchange\",\n description=\"example data exchange\")\nlisting_dataset = gcp.bigquery.Dataset(\"listing\",\n dataset_id=\"my_listing\",\n friendly_name=\"my_listing\",\n description=\"example data exchange\",\n location=\"US\")\nlisting_listing = gcp.bigqueryanalyticshub.Listing(\"listing\",\n location=\"US\",\n data_exchange_id=listing.data_exchange_id,\n listing_id=\"my_listing\",\n display_name=\"my_listing\",\n description=\"example data exchange\",\n bigquery_dataset={\n \"dataset\": listing_dataset.id,\n },\n restricted_export_config={\n \"enabled\": True,\n \"restrict_query_result\": True,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var listing = new Gcp.BigQueryAnalyticsHub.DataExchange(\"listing\", new()\n {\n Location = \"US\",\n DataExchangeId = \"my_data_exchange\",\n DisplayName = \"my_data_exchange\",\n Description = \"example data exchange\",\n });\n\n var listingDataset = new Gcp.BigQuery.Dataset(\"listing\", new()\n {\n DatasetId = \"my_listing\",\n FriendlyName = \"my_listing\",\n Description = \"example data exchange\",\n Location = \"US\",\n });\n\n var listingListing = new Gcp.BigQueryAnalyticsHub.Listing(\"listing\", new()\n {\n Location = \"US\",\n DataExchangeId = listing.DataExchangeId,\n ListingId = \"my_listing\",\n DisplayName = \"my_listing\",\n Description = \"example data exchange\",\n BigqueryDataset = new Gcp.BigQueryAnalyticsHub.Inputs.ListingBigqueryDatasetArgs\n {\n Dataset = listingDataset.Id,\n },\n RestrictedExportConfig = new Gcp.BigQueryAnalyticsHub.Inputs.ListingRestrictedExportConfigArgs\n {\n Enabled = true,\n RestrictQueryResult = true,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigqueryanalyticshub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tlisting, err := bigqueryanalyticshub.NewDataExchange(ctx, \"listing\", \u0026bigqueryanalyticshub.DataExchangeArgs{\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDataExchangeId: pulumi.String(\"my_data_exchange\"),\n\t\t\tDisplayName: pulumi.String(\"my_data_exchange\"),\n\t\t\tDescription: pulumi.String(\"example data exchange\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlistingDataset, err := bigquery.NewDataset(ctx, \"listing\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"my_listing\"),\n\t\t\tFriendlyName: pulumi.String(\"my_listing\"),\n\t\t\tDescription: pulumi.String(\"example data exchange\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = bigqueryanalyticshub.NewListing(ctx, \"listing\", \u0026bigqueryanalyticshub.ListingArgs{\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDataExchangeId: listing.DataExchangeId,\n\t\t\tListingId: pulumi.String(\"my_listing\"),\n\t\t\tDisplayName: pulumi.String(\"my_listing\"),\n\t\t\tDescription: pulumi.String(\"example data exchange\"),\n\t\t\tBigqueryDataset: \u0026bigqueryanalyticshub.ListingBigqueryDatasetArgs{\n\t\t\t\tDataset: listingDataset.ID(),\n\t\t\t},\n\t\t\tRestrictedExportConfig: \u0026bigqueryanalyticshub.ListingRestrictedExportConfigArgs{\n\t\t\t\tEnabled: pulumi.Bool(true),\n\t\t\t\tRestrictQueryResult: pulumi.Bool(true),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchange;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchangeArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.Listing;\nimport com.pulumi.gcp.bigqueryanalyticshub.ListingArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.inputs.ListingBigqueryDatasetArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.inputs.ListingRestrictedExportConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var listing = new DataExchange(\"listing\", DataExchangeArgs.builder()\n .location(\"US\")\n .dataExchangeId(\"my_data_exchange\")\n .displayName(\"my_data_exchange\")\n .description(\"example data exchange\")\n .build());\n\n var listingDataset = new Dataset(\"listingDataset\", DatasetArgs.builder()\n .datasetId(\"my_listing\")\n .friendlyName(\"my_listing\")\n .description(\"example data exchange\")\n .location(\"US\")\n .build());\n\n var listingListing = new Listing(\"listingListing\", ListingArgs.builder()\n .location(\"US\")\n .dataExchangeId(listing.dataExchangeId())\n .listingId(\"my_listing\")\n .displayName(\"my_listing\")\n .description(\"example data exchange\")\n .bigqueryDataset(ListingBigqueryDatasetArgs.builder()\n .dataset(listingDataset.id())\n .build())\n .restrictedExportConfig(ListingRestrictedExportConfigArgs.builder()\n .enabled(true)\n .restrictQueryResult(true)\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n listing:\n type: gcp:bigqueryanalyticshub:DataExchange\n properties:\n location: US\n dataExchangeId: my_data_exchange\n displayName: my_data_exchange\n description: example data exchange\n listingListing:\n type: gcp:bigqueryanalyticshub:Listing\n name: listing\n properties:\n location: US\n dataExchangeId: ${listing.dataExchangeId}\n listingId: my_listing\n displayName: my_listing\n description: example data exchange\n bigqueryDataset:\n dataset: ${listingDataset.id}\n restrictedExportConfig:\n enabled: true\n restrictQueryResult: true\n listingDataset:\n type: gcp:bigquery:Dataset\n name: listing\n properties:\n datasetId: my_listing\n friendlyName: my_listing\n description: example data exchange\n location: US\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nListing can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings/{{listing_id}}`\n\n* `{{project}}/{{location}}/{{data_exchange_id}}/{{listing_id}}`\n\n* `{{location}}/{{data_exchange_id}}/{{listing_id}}`\n\nWhen using the `pulumi import` command, Listing can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:bigqueryanalyticshub/listing:Listing default projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings/{{listing_id}}\n```\n\n```sh\n$ pulumi import gcp:bigqueryanalyticshub/listing:Listing default {{project}}/{{location}}/{{data_exchange_id}}/{{listing_id}}\n```\n\n```sh\n$ pulumi import gcp:bigqueryanalyticshub/listing:Listing default {{location}}/{{data_exchange_id}}/{{listing_id}}\n```\n\n", + "description": "A Bigquery Analytics Hub data exchange listing\n\n\nTo get more information about Listing, see:\n\n* [API documentation](https://cloud.google.com/bigquery/docs/reference/analytics-hub/rest/v1/projects.locations.dataExchanges.listings)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/bigquery/docs/analytics-hub-introduction)\n\n## Example Usage\n\n### Bigquery Analyticshub Listing Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst listing = new gcp.bigqueryanalyticshub.DataExchange(\"listing\", {\n location: \"US\",\n dataExchangeId: \"my_data_exchange\",\n displayName: \"my_data_exchange\",\n description: \"example data exchange\",\n});\nconst listingDataset = new gcp.bigquery.Dataset(\"listing\", {\n datasetId: \"my_listing\",\n friendlyName: \"my_listing\",\n description: \"example data exchange\",\n location: \"US\",\n});\nconst listingListing = new gcp.bigqueryanalyticshub.Listing(\"listing\", {\n location: \"US\",\n dataExchangeId: listing.dataExchangeId,\n listingId: \"my_listing\",\n displayName: \"my_listing\",\n description: \"example data exchange\",\n bigqueryDataset: {\n dataset: listingDataset.id,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nlisting = gcp.bigqueryanalyticshub.DataExchange(\"listing\",\n location=\"US\",\n data_exchange_id=\"my_data_exchange\",\n display_name=\"my_data_exchange\",\n description=\"example data exchange\")\nlisting_dataset = gcp.bigquery.Dataset(\"listing\",\n dataset_id=\"my_listing\",\n friendly_name=\"my_listing\",\n description=\"example data exchange\",\n location=\"US\")\nlisting_listing = gcp.bigqueryanalyticshub.Listing(\"listing\",\n location=\"US\",\n data_exchange_id=listing.data_exchange_id,\n listing_id=\"my_listing\",\n display_name=\"my_listing\",\n description=\"example data exchange\",\n bigquery_dataset={\n \"dataset\": listing_dataset.id,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var listing = new Gcp.BigQueryAnalyticsHub.DataExchange(\"listing\", new()\n {\n Location = \"US\",\n DataExchangeId = \"my_data_exchange\",\n DisplayName = \"my_data_exchange\",\n Description = \"example data exchange\",\n });\n\n var listingDataset = new Gcp.BigQuery.Dataset(\"listing\", new()\n {\n DatasetId = \"my_listing\",\n FriendlyName = \"my_listing\",\n Description = \"example data exchange\",\n Location = \"US\",\n });\n\n var listingListing = new Gcp.BigQueryAnalyticsHub.Listing(\"listing\", new()\n {\n Location = \"US\",\n DataExchangeId = listing.DataExchangeId,\n ListingId = \"my_listing\",\n DisplayName = \"my_listing\",\n Description = \"example data exchange\",\n BigqueryDataset = new Gcp.BigQueryAnalyticsHub.Inputs.ListingBigqueryDatasetArgs\n {\n Dataset = listingDataset.Id,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigqueryanalyticshub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tlisting, err := bigqueryanalyticshub.NewDataExchange(ctx, \"listing\", \u0026bigqueryanalyticshub.DataExchangeArgs{\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDataExchangeId: pulumi.String(\"my_data_exchange\"),\n\t\t\tDisplayName: pulumi.String(\"my_data_exchange\"),\n\t\t\tDescription: pulumi.String(\"example data exchange\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlistingDataset, err := bigquery.NewDataset(ctx, \"listing\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"my_listing\"),\n\t\t\tFriendlyName: pulumi.String(\"my_listing\"),\n\t\t\tDescription: pulumi.String(\"example data exchange\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = bigqueryanalyticshub.NewListing(ctx, \"listing\", \u0026bigqueryanalyticshub.ListingArgs{\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDataExchangeId: listing.DataExchangeId,\n\t\t\tListingId: pulumi.String(\"my_listing\"),\n\t\t\tDisplayName: pulumi.String(\"my_listing\"),\n\t\t\tDescription: pulumi.String(\"example data exchange\"),\n\t\t\tBigqueryDataset: \u0026bigqueryanalyticshub.ListingBigqueryDatasetArgs{\n\t\t\t\tDataset: listingDataset.ID(),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchange;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchangeArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.Listing;\nimport com.pulumi.gcp.bigqueryanalyticshub.ListingArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.inputs.ListingBigqueryDatasetArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var listing = new DataExchange(\"listing\", DataExchangeArgs.builder()\n .location(\"US\")\n .dataExchangeId(\"my_data_exchange\")\n .displayName(\"my_data_exchange\")\n .description(\"example data exchange\")\n .build());\n\n var listingDataset = new Dataset(\"listingDataset\", DatasetArgs.builder()\n .datasetId(\"my_listing\")\n .friendlyName(\"my_listing\")\n .description(\"example data exchange\")\n .location(\"US\")\n .build());\n\n var listingListing = new Listing(\"listingListing\", ListingArgs.builder()\n .location(\"US\")\n .dataExchangeId(listing.dataExchangeId())\n .listingId(\"my_listing\")\n .displayName(\"my_listing\")\n .description(\"example data exchange\")\n .bigqueryDataset(ListingBigqueryDatasetArgs.builder()\n .dataset(listingDataset.id())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n listing:\n type: gcp:bigqueryanalyticshub:DataExchange\n properties:\n location: US\n dataExchangeId: my_data_exchange\n displayName: my_data_exchange\n description: example data exchange\n listingListing:\n type: gcp:bigqueryanalyticshub:Listing\n name: listing\n properties:\n location: US\n dataExchangeId: ${listing.dataExchangeId}\n listingId: my_listing\n displayName: my_listing\n description: example data exchange\n bigqueryDataset:\n dataset: ${listingDataset.id}\n listingDataset:\n type: gcp:bigquery:Dataset\n name: listing\n properties:\n datasetId: my_listing\n friendlyName: my_listing\n description: example data exchange\n location: US\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Bigquery Analyticshub Listing Restricted\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst listing = new gcp.bigqueryanalyticshub.DataExchange(\"listing\", {\n location: \"US\",\n dataExchangeId: \"my_data_exchange\",\n displayName: \"my_data_exchange\",\n description: \"example data exchange\",\n});\nconst listingDataset = new gcp.bigquery.Dataset(\"listing\", {\n datasetId: \"my_listing\",\n friendlyName: \"my_listing\",\n description: \"example data exchange\",\n location: \"US\",\n});\nconst listingListing = new gcp.bigqueryanalyticshub.Listing(\"listing\", {\n location: \"US\",\n dataExchangeId: listing.dataExchangeId,\n listingId: \"my_listing\",\n displayName: \"my_listing\",\n description: \"example data exchange\",\n bigqueryDataset: {\n dataset: listingDataset.id,\n },\n restrictedExportConfig: {\n enabled: true,\n restrictQueryResult: true,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nlisting = gcp.bigqueryanalyticshub.DataExchange(\"listing\",\n location=\"US\",\n data_exchange_id=\"my_data_exchange\",\n display_name=\"my_data_exchange\",\n description=\"example data exchange\")\nlisting_dataset = gcp.bigquery.Dataset(\"listing\",\n dataset_id=\"my_listing\",\n friendly_name=\"my_listing\",\n description=\"example data exchange\",\n location=\"US\")\nlisting_listing = gcp.bigqueryanalyticshub.Listing(\"listing\",\n location=\"US\",\n data_exchange_id=listing.data_exchange_id,\n listing_id=\"my_listing\",\n display_name=\"my_listing\",\n description=\"example data exchange\",\n bigquery_dataset={\n \"dataset\": listing_dataset.id,\n },\n restricted_export_config={\n \"enabled\": True,\n \"restrict_query_result\": True,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var listing = new Gcp.BigQueryAnalyticsHub.DataExchange(\"listing\", new()\n {\n Location = \"US\",\n DataExchangeId = \"my_data_exchange\",\n DisplayName = \"my_data_exchange\",\n Description = \"example data exchange\",\n });\n\n var listingDataset = new Gcp.BigQuery.Dataset(\"listing\", new()\n {\n DatasetId = \"my_listing\",\n FriendlyName = \"my_listing\",\n Description = \"example data exchange\",\n Location = \"US\",\n });\n\n var listingListing = new Gcp.BigQueryAnalyticsHub.Listing(\"listing\", new()\n {\n Location = \"US\",\n DataExchangeId = listing.DataExchangeId,\n ListingId = \"my_listing\",\n DisplayName = \"my_listing\",\n Description = \"example data exchange\",\n BigqueryDataset = new Gcp.BigQueryAnalyticsHub.Inputs.ListingBigqueryDatasetArgs\n {\n Dataset = listingDataset.Id,\n },\n RestrictedExportConfig = new Gcp.BigQueryAnalyticsHub.Inputs.ListingRestrictedExportConfigArgs\n {\n Enabled = true,\n RestrictQueryResult = true,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigqueryanalyticshub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tlisting, err := bigqueryanalyticshub.NewDataExchange(ctx, \"listing\", \u0026bigqueryanalyticshub.DataExchangeArgs{\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDataExchangeId: pulumi.String(\"my_data_exchange\"),\n\t\t\tDisplayName: pulumi.String(\"my_data_exchange\"),\n\t\t\tDescription: pulumi.String(\"example data exchange\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlistingDataset, err := bigquery.NewDataset(ctx, \"listing\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"my_listing\"),\n\t\t\tFriendlyName: pulumi.String(\"my_listing\"),\n\t\t\tDescription: pulumi.String(\"example data exchange\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = bigqueryanalyticshub.NewListing(ctx, \"listing\", \u0026bigqueryanalyticshub.ListingArgs{\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDataExchangeId: listing.DataExchangeId,\n\t\t\tListingId: pulumi.String(\"my_listing\"),\n\t\t\tDisplayName: pulumi.String(\"my_listing\"),\n\t\t\tDescription: pulumi.String(\"example data exchange\"),\n\t\t\tBigqueryDataset: \u0026bigqueryanalyticshub.ListingBigqueryDatasetArgs{\n\t\t\t\tDataset: listingDataset.ID(),\n\t\t\t},\n\t\t\tRestrictedExportConfig: \u0026bigqueryanalyticshub.ListingRestrictedExportConfigArgs{\n\t\t\t\tEnabled: pulumi.Bool(true),\n\t\t\t\tRestrictQueryResult: pulumi.Bool(true),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchange;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchangeArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.Listing;\nimport com.pulumi.gcp.bigqueryanalyticshub.ListingArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.inputs.ListingBigqueryDatasetArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.inputs.ListingRestrictedExportConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var listing = new DataExchange(\"listing\", DataExchangeArgs.builder()\n .location(\"US\")\n .dataExchangeId(\"my_data_exchange\")\n .displayName(\"my_data_exchange\")\n .description(\"example data exchange\")\n .build());\n\n var listingDataset = new Dataset(\"listingDataset\", DatasetArgs.builder()\n .datasetId(\"my_listing\")\n .friendlyName(\"my_listing\")\n .description(\"example data exchange\")\n .location(\"US\")\n .build());\n\n var listingListing = new Listing(\"listingListing\", ListingArgs.builder()\n .location(\"US\")\n .dataExchangeId(listing.dataExchangeId())\n .listingId(\"my_listing\")\n .displayName(\"my_listing\")\n .description(\"example data exchange\")\n .bigqueryDataset(ListingBigqueryDatasetArgs.builder()\n .dataset(listingDataset.id())\n .build())\n .restrictedExportConfig(ListingRestrictedExportConfigArgs.builder()\n .enabled(true)\n .restrictQueryResult(true)\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n listing:\n type: gcp:bigqueryanalyticshub:DataExchange\n properties:\n location: US\n dataExchangeId: my_data_exchange\n displayName: my_data_exchange\n description: example data exchange\n listingListing:\n type: gcp:bigqueryanalyticshub:Listing\n name: listing\n properties:\n location: US\n dataExchangeId: ${listing.dataExchangeId}\n listingId: my_listing\n displayName: my_listing\n description: example data exchange\n bigqueryDataset:\n dataset: ${listingDataset.id}\n restrictedExportConfig:\n enabled: true\n restrictQueryResult: true\n listingDataset:\n type: gcp:bigquery:Dataset\n name: listing\n properties:\n datasetId: my_listing\n friendlyName: my_listing\n description: example data exchange\n location: US\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Bigquery Analyticshub Listing Dcr\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst listing = new gcp.bigqueryanalyticshub.DataExchange(\"listing\", {\n location: \"US\",\n dataExchangeId: \"dcr_data_exchange\",\n displayName: \"dcr_data_exchange\",\n description: \"example dcr data exchange\",\n sharingEnvironmentConfig: {\n dcrExchangeConfig: {},\n },\n});\nconst listingDataset = new gcp.bigquery.Dataset(\"listing\", {\n datasetId: \"dcr_listing\",\n friendlyName: \"dcr_listing\",\n description: \"example dcr data exchange\",\n location: \"US\",\n});\nconst listingTable = new gcp.bigquery.Table(\"listing\", {\n deletionProtection: false,\n tableId: \"dcr_listing\",\n datasetId: listingDataset.datasetId,\n schema: `[\n {\n \"name\": \"name\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"post_abbr\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"date\",\n \"type\": \"DATE\",\n \"mode\": \"NULLABLE\"\n }\n]\n`,\n});\nconst listingListing = new gcp.bigqueryanalyticshub.Listing(\"listing\", {\n location: \"US\",\n dataExchangeId: listing.dataExchangeId,\n listingId: \"dcr_listing\",\n displayName: \"dcr_listing\",\n description: \"example dcr data exchange\",\n bigqueryDataset: {\n dataset: listingDataset.id,\n selectedResources: [{\n table: listingTable.id,\n }],\n },\n restrictedExportConfig: {\n enabled: true,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nlisting = gcp.bigqueryanalyticshub.DataExchange(\"listing\",\n location=\"US\",\n data_exchange_id=\"dcr_data_exchange\",\n display_name=\"dcr_data_exchange\",\n description=\"example dcr data exchange\",\n sharing_environment_config={\n \"dcr_exchange_config\": {},\n })\nlisting_dataset = gcp.bigquery.Dataset(\"listing\",\n dataset_id=\"dcr_listing\",\n friendly_name=\"dcr_listing\",\n description=\"example dcr data exchange\",\n location=\"US\")\nlisting_table = gcp.bigquery.Table(\"listing\",\n deletion_protection=False,\n table_id=\"dcr_listing\",\n dataset_id=listing_dataset.dataset_id,\n schema=\"\"\"[\n {\n \"name\": \"name\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"post_abbr\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"date\",\n \"type\": \"DATE\",\n \"mode\": \"NULLABLE\"\n }\n]\n\"\"\")\nlisting_listing = gcp.bigqueryanalyticshub.Listing(\"listing\",\n location=\"US\",\n data_exchange_id=listing.data_exchange_id,\n listing_id=\"dcr_listing\",\n display_name=\"dcr_listing\",\n description=\"example dcr data exchange\",\n bigquery_dataset={\n \"dataset\": listing_dataset.id,\n \"selected_resources\": [{\n \"table\": listing_table.id,\n }],\n },\n restricted_export_config={\n \"enabled\": True,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var listing = new Gcp.BigQueryAnalyticsHub.DataExchange(\"listing\", new()\n {\n Location = \"US\",\n DataExchangeId = \"dcr_data_exchange\",\n DisplayName = \"dcr_data_exchange\",\n Description = \"example dcr data exchange\",\n SharingEnvironmentConfig = new Gcp.BigQueryAnalyticsHub.Inputs.DataExchangeSharingEnvironmentConfigArgs\n {\n DcrExchangeConfig = null,\n },\n });\n\n var listingDataset = new Gcp.BigQuery.Dataset(\"listing\", new()\n {\n DatasetId = \"dcr_listing\",\n FriendlyName = \"dcr_listing\",\n Description = \"example dcr data exchange\",\n Location = \"US\",\n });\n\n var listingTable = new Gcp.BigQuery.Table(\"listing\", new()\n {\n DeletionProtection = false,\n TableId = \"dcr_listing\",\n DatasetId = listingDataset.DatasetId,\n Schema = @\"[\n {\n \"\"name\"\": \"\"name\"\",\n \"\"type\"\": \"\"STRING\"\",\n \"\"mode\"\": \"\"NULLABLE\"\"\n },\n {\n \"\"name\"\": \"\"post_abbr\"\",\n \"\"type\"\": \"\"STRING\"\",\n \"\"mode\"\": \"\"NULLABLE\"\"\n },\n {\n \"\"name\"\": \"\"date\"\",\n \"\"type\"\": \"\"DATE\"\",\n \"\"mode\"\": \"\"NULLABLE\"\"\n }\n]\n\",\n });\n\n var listingListing = new Gcp.BigQueryAnalyticsHub.Listing(\"listing\", new()\n {\n Location = \"US\",\n DataExchangeId = listing.DataExchangeId,\n ListingId = \"dcr_listing\",\n DisplayName = \"dcr_listing\",\n Description = \"example dcr data exchange\",\n BigqueryDataset = new Gcp.BigQueryAnalyticsHub.Inputs.ListingBigqueryDatasetArgs\n {\n Dataset = listingDataset.Id,\n SelectedResources = new[]\n {\n new Gcp.BigQueryAnalyticsHub.Inputs.ListingBigqueryDatasetSelectedResourceArgs\n {\n Table = listingTable.Id,\n },\n },\n },\n RestrictedExportConfig = new Gcp.BigQueryAnalyticsHub.Inputs.ListingRestrictedExportConfigArgs\n {\n Enabled = true,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigqueryanalyticshub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tlisting, err := bigqueryanalyticshub.NewDataExchange(ctx, \"listing\", \u0026bigqueryanalyticshub.DataExchangeArgs{\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDataExchangeId: pulumi.String(\"dcr_data_exchange\"),\n\t\t\tDisplayName: pulumi.String(\"dcr_data_exchange\"),\n\t\t\tDescription: pulumi.String(\"example dcr data exchange\"),\n\t\t\tSharingEnvironmentConfig: \u0026bigqueryanalyticshub.DataExchangeSharingEnvironmentConfigArgs{\n\t\t\t\tDcrExchangeConfig: nil,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlistingDataset, err := bigquery.NewDataset(ctx, \"listing\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"dcr_listing\"),\n\t\t\tFriendlyName: pulumi.String(\"dcr_listing\"),\n\t\t\tDescription: pulumi.String(\"example dcr data exchange\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlistingTable, err := bigquery.NewTable(ctx, \"listing\", \u0026bigquery.TableArgs{\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tTableId: pulumi.String(\"dcr_listing\"),\n\t\t\tDatasetId: listingDataset.DatasetId,\n\t\t\tSchema: pulumi.String(`[\n {\n \"name\": \"name\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"post_abbr\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"date\",\n \"type\": \"DATE\",\n \"mode\": \"NULLABLE\"\n }\n]\n`),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = bigqueryanalyticshub.NewListing(ctx, \"listing\", \u0026bigqueryanalyticshub.ListingArgs{\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDataExchangeId: listing.DataExchangeId,\n\t\t\tListingId: pulumi.String(\"dcr_listing\"),\n\t\t\tDisplayName: pulumi.String(\"dcr_listing\"),\n\t\t\tDescription: pulumi.String(\"example dcr data exchange\"),\n\t\t\tBigqueryDataset: \u0026bigqueryanalyticshub.ListingBigqueryDatasetArgs{\n\t\t\t\tDataset: listingDataset.ID(),\n\t\t\t\tSelectedResources: bigqueryanalyticshub.ListingBigqueryDatasetSelectedResourceArray{\n\t\t\t\t\t\u0026bigqueryanalyticshub.ListingBigqueryDatasetSelectedResourceArgs{\n\t\t\t\t\t\tTable: listingTable.ID(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRestrictedExportConfig: \u0026bigqueryanalyticshub.ListingRestrictedExportConfigArgs{\n\t\t\t\tEnabled: pulumi.Bool(true),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchange;\nimport com.pulumi.gcp.bigqueryanalyticshub.DataExchangeArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.inputs.DataExchangeSharingEnvironmentConfigArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.inputs.DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigquery.Table;\nimport com.pulumi.gcp.bigquery.TableArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.Listing;\nimport com.pulumi.gcp.bigqueryanalyticshub.ListingArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.inputs.ListingBigqueryDatasetArgs;\nimport com.pulumi.gcp.bigqueryanalyticshub.inputs.ListingRestrictedExportConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var listing = new DataExchange(\"listing\", DataExchangeArgs.builder()\n .location(\"US\")\n .dataExchangeId(\"dcr_data_exchange\")\n .displayName(\"dcr_data_exchange\")\n .description(\"example dcr data exchange\")\n .sharingEnvironmentConfig(DataExchangeSharingEnvironmentConfigArgs.builder()\n .dcrExchangeConfig()\n .build())\n .build());\n\n var listingDataset = new Dataset(\"listingDataset\", DatasetArgs.builder()\n .datasetId(\"dcr_listing\")\n .friendlyName(\"dcr_listing\")\n .description(\"example dcr data exchange\")\n .location(\"US\")\n .build());\n\n var listingTable = new Table(\"listingTable\", TableArgs.builder()\n .deletionProtection(false)\n .tableId(\"dcr_listing\")\n .datasetId(listingDataset.datasetId())\n .schema(\"\"\"\n[\n {\n \"name\": \"name\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"post_abbr\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"date\",\n \"type\": \"DATE\",\n \"mode\": \"NULLABLE\"\n }\n]\n \"\"\")\n .build());\n\n var listingListing = new Listing(\"listingListing\", ListingArgs.builder()\n .location(\"US\")\n .dataExchangeId(listing.dataExchangeId())\n .listingId(\"dcr_listing\")\n .displayName(\"dcr_listing\")\n .description(\"example dcr data exchange\")\n .bigqueryDataset(ListingBigqueryDatasetArgs.builder()\n .dataset(listingDataset.id())\n .selectedResources(ListingBigqueryDatasetSelectedResourceArgs.builder()\n .table(listingTable.id())\n .build())\n .build())\n .restrictedExportConfig(ListingRestrictedExportConfigArgs.builder()\n .enabled(true)\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n listing:\n type: gcp:bigqueryanalyticshub:DataExchange\n properties:\n location: US\n dataExchangeId: dcr_data_exchange\n displayName: dcr_data_exchange\n description: example dcr data exchange\n sharingEnvironmentConfig:\n dcrExchangeConfig: {}\n listingListing:\n type: gcp:bigqueryanalyticshub:Listing\n name: listing\n properties:\n location: US\n dataExchangeId: ${listing.dataExchangeId}\n listingId: dcr_listing\n displayName: dcr_listing\n description: example dcr data exchange\n bigqueryDataset:\n dataset: ${listingDataset.id}\n selectedResources:\n - table: ${listingTable.id}\n restrictedExportConfig:\n enabled: true\n listingDataset:\n type: gcp:bigquery:Dataset\n name: listing\n properties:\n datasetId: dcr_listing\n friendlyName: dcr_listing\n description: example dcr data exchange\n location: US\n listingTable:\n type: gcp:bigquery:Table\n name: listing\n properties:\n deletionProtection: false\n tableId: dcr_listing\n datasetId: ${listingDataset.datasetId}\n schema: |\n [\n {\n \"name\": \"name\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"post_abbr\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\"\n },\n {\n \"name\": \"date\",\n \"type\": \"DATE\",\n \"mode\": \"NULLABLE\"\n }\n ]\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nListing can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings/{{listing_id}}`\n\n* `{{project}}/{{location}}/{{data_exchange_id}}/{{listing_id}}`\n\n* `{{location}}/{{data_exchange_id}}/{{listing_id}}`\n\nWhen using the `pulumi import` command, Listing can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:bigqueryanalyticshub/listing:Listing default projects/{{project}}/locations/{{location}}/dataExchanges/{{data_exchange_id}}/listings/{{listing_id}}\n```\n\n```sh\n$ pulumi import gcp:bigqueryanalyticshub/listing:Listing default {{project}}/{{location}}/{{data_exchange_id}}/{{listing_id}}\n```\n\n```sh\n$ pulumi import gcp:bigqueryanalyticshub/listing:Listing default {{location}}/{{data_exchange_id}}/{{listing_id}}\n```\n\n", "properties": { "bigqueryDataset": { "$ref": "#/types/gcp:bigqueryanalyticshub/ListingBigqueryDataset:ListingBigqueryDataset", @@ -127722,7 +129238,8 @@ "inputProperties": { "bigqueryDataset": { "$ref": "#/types/gcp:bigqueryanalyticshub/ListingBigqueryDataset:ListingBigqueryDataset", - "description": "Shared dataset i.e. BigQuery dataset source.\nStructure is documented below.\n" + "description": "Shared dataset i.e. BigQuery dataset source.\nStructure is documented below.\n", + "willReplaceOnChanges": true }, "categories": { "type": "array", @@ -127799,7 +129316,8 @@ "properties": { "bigqueryDataset": { "$ref": "#/types/gcp:bigqueryanalyticshub/ListingBigqueryDataset:ListingBigqueryDataset", - "description": "Shared dataset i.e. BigQuery dataset source.\nStructure is documented below.\n" + "description": "Shared dataset i.e. BigQuery dataset source.\nStructure is documented below.\n", + "willReplaceOnChanges": true }, "categories": { "type": "array", @@ -129390,7 +130908,7 @@ } }, "gcp:bigtable/table:Table": { - "description": "Creates a Google Cloud Bigtable table inside an instance. For more information see\n[the official documentation](https://cloud.google.com/bigtable/) and\n[API](https://cloud.google.com/bigtable/docs/go/reference).\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst instance = new gcp.bigtable.Instance(\"instance\", {\n name: \"tf-instance\",\n clusters: [{\n clusterId: \"tf-instance-cluster\",\n zone: \"us-central1-b\",\n numNodes: 3,\n storageType: \"HDD\",\n }],\n});\nconst table = new gcp.bigtable.Table(\"table\", {\n name: \"tf-table\",\n instanceName: instance.name,\n splitKeys: [\n \"a\",\n \"b\",\n \"c\",\n ],\n columnFamilies: [\n {\n family: \"family-first\",\n },\n {\n family: \"family-second\",\n },\n ],\n changeStreamRetention: \"24h0m0s\",\n automatedBackupPolicy: {\n retentionPeriod: \"72h0m0s\",\n frequency: \"24h0m0s\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ninstance = gcp.bigtable.Instance(\"instance\",\n name=\"tf-instance\",\n clusters=[{\n \"cluster_id\": \"tf-instance-cluster\",\n \"zone\": \"us-central1-b\",\n \"num_nodes\": 3,\n \"storage_type\": \"HDD\",\n }])\ntable = gcp.bigtable.Table(\"table\",\n name=\"tf-table\",\n instance_name=instance.name,\n split_keys=[\n \"a\",\n \"b\",\n \"c\",\n ],\n column_families=[\n {\n \"family\": \"family-first\",\n },\n {\n \"family\": \"family-second\",\n },\n ],\n change_stream_retention=\"24h0m0s\",\n automated_backup_policy={\n \"retention_period\": \"72h0m0s\",\n \"frequency\": \"24h0m0s\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var instance = new Gcp.BigTable.Instance(\"instance\", new()\n {\n Name = \"tf-instance\",\n Clusters = new[]\n {\n new Gcp.BigTable.Inputs.InstanceClusterArgs\n {\n ClusterId = \"tf-instance-cluster\",\n Zone = \"us-central1-b\",\n NumNodes = 3,\n StorageType = \"HDD\",\n },\n },\n });\n\n var table = new Gcp.BigTable.Table(\"table\", new()\n {\n Name = \"tf-table\",\n InstanceName = instance.Name,\n SplitKeys = new[]\n {\n \"a\",\n \"b\",\n \"c\",\n },\n ColumnFamilies = new[]\n {\n new Gcp.BigTable.Inputs.TableColumnFamilyArgs\n {\n Family = \"family-first\",\n },\n new Gcp.BigTable.Inputs.TableColumnFamilyArgs\n {\n Family = \"family-second\",\n },\n },\n ChangeStreamRetention = \"24h0m0s\",\n AutomatedBackupPolicy = new Gcp.BigTable.Inputs.TableAutomatedBackupPolicyArgs\n {\n RetentionPeriod = \"72h0m0s\",\n Frequency = \"24h0m0s\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigtable\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tinstance, err := bigtable.NewInstance(ctx, \"instance\", \u0026bigtable.InstanceArgs{\n\t\t\tName: pulumi.String(\"tf-instance\"),\n\t\t\tClusters: bigtable.InstanceClusterArray{\n\t\t\t\t\u0026bigtable.InstanceClusterArgs{\n\t\t\t\t\tClusterId: pulumi.String(\"tf-instance-cluster\"),\n\t\t\t\t\tZone: pulumi.String(\"us-central1-b\"),\n\t\t\t\t\tNumNodes: pulumi.Int(3),\n\t\t\t\t\tStorageType: pulumi.String(\"HDD\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = bigtable.NewTable(ctx, \"table\", \u0026bigtable.TableArgs{\n\t\t\tName: pulumi.String(\"tf-table\"),\n\t\t\tInstanceName: instance.Name,\n\t\t\tSplitKeys: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"a\"),\n\t\t\t\tpulumi.String(\"b\"),\n\t\t\t\tpulumi.String(\"c\"),\n\t\t\t},\n\t\t\tColumnFamilies: bigtable.TableColumnFamilyArray{\n\t\t\t\t\u0026bigtable.TableColumnFamilyArgs{\n\t\t\t\t\tFamily: pulumi.String(\"family-first\"),\n\t\t\t\t},\n\t\t\t\t\u0026bigtable.TableColumnFamilyArgs{\n\t\t\t\t\tFamily: pulumi.String(\"family-second\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tChangeStreamRetention: pulumi.String(\"24h0m0s\"),\n\t\t\tAutomatedBackupPolicy: \u0026bigtable.TableAutomatedBackupPolicyArgs{\n\t\t\t\tRetentionPeriod: pulumi.String(\"72h0m0s\"),\n\t\t\t\tFrequency: pulumi.String(\"24h0m0s\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.bigtable.Instance;\nimport com.pulumi.gcp.bigtable.InstanceArgs;\nimport com.pulumi.gcp.bigtable.inputs.InstanceClusterArgs;\nimport com.pulumi.gcp.bigtable.Table;\nimport com.pulumi.gcp.bigtable.TableArgs;\nimport com.pulumi.gcp.bigtable.inputs.TableColumnFamilyArgs;\nimport com.pulumi.gcp.bigtable.inputs.TableAutomatedBackupPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var instance = new Instance(\"instance\", InstanceArgs.builder()\n .name(\"tf-instance\")\n .clusters(InstanceClusterArgs.builder()\n .clusterId(\"tf-instance-cluster\")\n .zone(\"us-central1-b\")\n .numNodes(3)\n .storageType(\"HDD\")\n .build())\n .build());\n\n var table = new Table(\"table\", TableArgs.builder()\n .name(\"tf-table\")\n .instanceName(instance.name())\n .splitKeys( \n \"a\",\n \"b\",\n \"c\")\n .columnFamilies( \n TableColumnFamilyArgs.builder()\n .family(\"family-first\")\n .build(),\n TableColumnFamilyArgs.builder()\n .family(\"family-second\")\n .build())\n .changeStreamRetention(\"24h0m0s\")\n .automatedBackupPolicy(TableAutomatedBackupPolicyArgs.builder()\n .retentionPeriod(\"72h0m0s\")\n .frequency(\"24h0m0s\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instance:\n type: gcp:bigtable:Instance\n properties:\n name: tf-instance\n clusters:\n - clusterId: tf-instance-cluster\n zone: us-central1-b\n numNodes: 3\n storageType: HDD\n table:\n type: gcp:bigtable:Table\n properties:\n name: tf-table\n instanceName: ${instance.name}\n splitKeys:\n - a\n - b\n - c\n columnFamilies:\n - family: family-first\n - family: family-second\n changeStreamRetention: 24h0m0s\n automatedBackupPolicy:\n retentionPeriod: 72h0m0s\n frequency: 24h0m0s\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\n-\u003e **Fields affected by import** The following fields can't be read and will show diffs if set in config when imported: `split_keys`\n\nBigtable Tables can be imported using any of these accepted formats:\n\n* `projects/{{project}}/instances/{{instance_name}}/tables/{{name}}`\n\n* `{{project}}/{{instance_name}}/{{name}}`\n\n* `{{instance_name}}/{{name}}`\n\nWhen using the `pulumi import` command, Bigtable Tables can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:bigtable/table:Table default projects/{{project}}/instances/{{instance_name}}/tables/{{name}}\n```\n\n```sh\n$ pulumi import gcp:bigtable/table:Table default {{project}}/{{instance_name}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:bigtable/table:Table default {{instance_name}}/{{name}}\n```\n\n", + "description": "Creates a Google Cloud Bigtable table inside an instance. For more information see\n[the official documentation](https://cloud.google.com/bigtable/) and\n[API](https://cloud.google.com/bigtable/docs/go/reference).\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst instance = new gcp.bigtable.Instance(\"instance\", {\n name: \"tf-instance\",\n clusters: [{\n clusterId: \"tf-instance-cluster\",\n zone: \"us-central1-b\",\n numNodes: 3,\n storageType: \"HDD\",\n }],\n});\nconst table = new gcp.bigtable.Table(\"table\", {\n name: \"tf-table\",\n instanceName: instance.name,\n splitKeys: [\n \"a\",\n \"b\",\n \"c\",\n ],\n columnFamilies: [\n {\n family: \"family-first\",\n },\n {\n family: \"family-second\",\n type: \"intsum\",\n },\n {\n family: \"family-third\",\n type: ` {\n\\x09\\x09\\x09\\x09\\x09\"aggregateType\": {\n\\x09\\x09\\x09\\x09\\x09\\x09\"max\": {},\n\\x09\\x09\\x09\\x09\\x09\\x09\"inputType\": {\n\\x09\\x09\\x09\\x09\\x09\\x09\\x09\"int64Type\": {\n\\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09\"encoding\": {\n\\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09\"bigEndianBytes\": {}\n\\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09}\n\\x09\\x09\\x09\\x09\\x09\\x09\\x09}\n\\x09\\x09\\x09\\x09\\x09\\x09}\n\\x09\\x09\\x09\\x09\\x09}\n\\x09\\x09\\x09\\x09}\n`,\n },\n ],\n changeStreamRetention: \"24h0m0s\",\n automatedBackupPolicy: {\n retentionPeriod: \"72h0m0s\",\n frequency: \"24h0m0s\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ninstance = gcp.bigtable.Instance(\"instance\",\n name=\"tf-instance\",\n clusters=[{\n \"cluster_id\": \"tf-instance-cluster\",\n \"zone\": \"us-central1-b\",\n \"num_nodes\": 3,\n \"storage_type\": \"HDD\",\n }])\ntable = gcp.bigtable.Table(\"table\",\n name=\"tf-table\",\n instance_name=instance.name,\n split_keys=[\n \"a\",\n \"b\",\n \"c\",\n ],\n column_families=[\n {\n \"family\": \"family-first\",\n },\n {\n \"family\": \"family-second\",\n \"type\": \"intsum\",\n },\n {\n \"family\": \"family-third\",\n \"type\": \"\"\" {\n\\x09\\x09\\x09\\x09\\x09\"aggregateType\": {\n\\x09\\x09\\x09\\x09\\x09\\x09\"max\": {},\n\\x09\\x09\\x09\\x09\\x09\\x09\"inputType\": {\n\\x09\\x09\\x09\\x09\\x09\\x09\\x09\"int64Type\": {\n\\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09\"encoding\": {\n\\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09\"bigEndianBytes\": {}\n\\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09}\n\\x09\\x09\\x09\\x09\\x09\\x09\\x09}\n\\x09\\x09\\x09\\x09\\x09\\x09}\n\\x09\\x09\\x09\\x09\\x09}\n\\x09\\x09\\x09\\x09}\n\"\"\",\n },\n ],\n change_stream_retention=\"24h0m0s\",\n automated_backup_policy={\n \"retention_period\": \"72h0m0s\",\n \"frequency\": \"24h0m0s\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var instance = new Gcp.BigTable.Instance(\"instance\", new()\n {\n Name = \"tf-instance\",\n Clusters = new[]\n {\n new Gcp.BigTable.Inputs.InstanceClusterArgs\n {\n ClusterId = \"tf-instance-cluster\",\n Zone = \"us-central1-b\",\n NumNodes = 3,\n StorageType = \"HDD\",\n },\n },\n });\n\n var table = new Gcp.BigTable.Table(\"table\", new()\n {\n Name = \"tf-table\",\n InstanceName = instance.Name,\n SplitKeys = new[]\n {\n \"a\",\n \"b\",\n \"c\",\n },\n ColumnFamilies = new[]\n {\n new Gcp.BigTable.Inputs.TableColumnFamilyArgs\n {\n Family = \"family-first\",\n },\n new Gcp.BigTable.Inputs.TableColumnFamilyArgs\n {\n Family = \"family-second\",\n Type = \"intsum\",\n },\n new Gcp.BigTable.Inputs.TableColumnFamilyArgs\n {\n Family = \"family-third\",\n Type = @\" {\n\t\t\t\t\t\"\"aggregateType\"\": {\n\t\t\t\t\t\t\"\"max\"\": {},\n\t\t\t\t\t\t\"\"inputType\"\": {\n\t\t\t\t\t\t\t\"\"int64Type\"\": {\n\t\t\t\t\t\t\t\t\"\"encoding\"\": {\n\t\t\t\t\t\t\t\t\t\"\"bigEndianBytes\"\": {}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\",\n },\n },\n ChangeStreamRetention = \"24h0m0s\",\n AutomatedBackupPolicy = new Gcp.BigTable.Inputs.TableAutomatedBackupPolicyArgs\n {\n RetentionPeriod = \"72h0m0s\",\n Frequency = \"24h0m0s\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigtable\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tinstance, err := bigtable.NewInstance(ctx, \"instance\", \u0026bigtable.InstanceArgs{\n\t\t\tName: pulumi.String(\"tf-instance\"),\n\t\t\tClusters: bigtable.InstanceClusterArray{\n\t\t\t\t\u0026bigtable.InstanceClusterArgs{\n\t\t\t\t\tClusterId: pulumi.String(\"tf-instance-cluster\"),\n\t\t\t\t\tZone: pulumi.String(\"us-central1-b\"),\n\t\t\t\t\tNumNodes: pulumi.Int(3),\n\t\t\t\t\tStorageType: pulumi.String(\"HDD\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = bigtable.NewTable(ctx, \"table\", \u0026bigtable.TableArgs{\n\t\t\tName: pulumi.String(\"tf-table\"),\n\t\t\tInstanceName: instance.Name,\n\t\t\tSplitKeys: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"a\"),\n\t\t\t\tpulumi.String(\"b\"),\n\t\t\t\tpulumi.String(\"c\"),\n\t\t\t},\n\t\t\tColumnFamilies: bigtable.TableColumnFamilyArray{\n\t\t\t\t\u0026bigtable.TableColumnFamilyArgs{\n\t\t\t\t\tFamily: pulumi.String(\"family-first\"),\n\t\t\t\t},\n\t\t\t\t\u0026bigtable.TableColumnFamilyArgs{\n\t\t\t\t\tFamily: pulumi.String(\"family-second\"),\n\t\t\t\t\tType: pulumi.String(\"intsum\"),\n\t\t\t\t},\n\t\t\t\t\u0026bigtable.TableColumnFamilyArgs{\n\t\t\t\t\tFamily: pulumi.String(\"family-third\"),\n\t\t\t\t\tType: pulumi.String(` {\n\t\t\t\t\t\"aggregateType\": {\n\t\t\t\t\t\t\"max\": {},\n\t\t\t\t\t\t\"inputType\": {\n\t\t\t\t\t\t\t\"int64Type\": {\n\t\t\t\t\t\t\t\t\"encoding\": {\n\t\t\t\t\t\t\t\t\t\"bigEndianBytes\": {}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n`),\n\t\t\t\t},\n\t\t\t},\n\t\t\tChangeStreamRetention: pulumi.String(\"24h0m0s\"),\n\t\t\tAutomatedBackupPolicy: \u0026bigtable.TableAutomatedBackupPolicyArgs{\n\t\t\t\tRetentionPeriod: pulumi.String(\"72h0m0s\"),\n\t\t\t\tFrequency: pulumi.String(\"24h0m0s\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.bigtable.Instance;\nimport com.pulumi.gcp.bigtable.InstanceArgs;\nimport com.pulumi.gcp.bigtable.inputs.InstanceClusterArgs;\nimport com.pulumi.gcp.bigtable.Table;\nimport com.pulumi.gcp.bigtable.TableArgs;\nimport com.pulumi.gcp.bigtable.inputs.TableColumnFamilyArgs;\nimport com.pulumi.gcp.bigtable.inputs.TableAutomatedBackupPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var instance = new Instance(\"instance\", InstanceArgs.builder()\n .name(\"tf-instance\")\n .clusters(InstanceClusterArgs.builder()\n .clusterId(\"tf-instance-cluster\")\n .zone(\"us-central1-b\")\n .numNodes(3)\n .storageType(\"HDD\")\n .build())\n .build());\n\n var table = new Table(\"table\", TableArgs.builder()\n .name(\"tf-table\")\n .instanceName(instance.name())\n .splitKeys( \n \"a\",\n \"b\",\n \"c\")\n .columnFamilies( \n TableColumnFamilyArgs.builder()\n .family(\"family-first\")\n .build(),\n TableColumnFamilyArgs.builder()\n .family(\"family-second\")\n .type(\"intsum\")\n .build(),\n TableColumnFamilyArgs.builder()\n .family(\"family-third\")\n .type(\"\"\"\n {\n\t\t\t\t\t\"aggregateType\": {\n\t\t\t\t\t\t\"max\": {},\n\t\t\t\t\t\t\"inputType\": {\n\t\t\t\t\t\t\t\"int64Type\": {\n\t\t\t\t\t\t\t\t\"encoding\": {\n\t\t\t\t\t\t\t\t\t\"bigEndianBytes\": {}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n \"\"\")\n .build())\n .changeStreamRetention(\"24h0m0s\")\n .automatedBackupPolicy(TableAutomatedBackupPolicyArgs.builder()\n .retentionPeriod(\"72h0m0s\")\n .frequency(\"24h0m0s\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instance:\n type: gcp:bigtable:Instance\n properties:\n name: tf-instance\n clusters:\n - clusterId: tf-instance-cluster\n zone: us-central1-b\n numNodes: 3\n storageType: HDD\n table:\n type: gcp:bigtable:Table\n properties:\n name: tf-table\n instanceName: ${instance.name}\n splitKeys:\n - a\n - b\n - c\n columnFamilies:\n - family: family-first\n - family: family-second\n type: intsum\n - family: family-third\n type: |2\n {\n \t\t\t\t\t\"aggregateType\": {\n \t\t\t\t\t\t\"max\": {},\n \t\t\t\t\t\t\"inputType\": {\n \t\t\t\t\t\t\t\"int64Type\": {\n \t\t\t\t\t\t\t\t\"encoding\": {\n \t\t\t\t\t\t\t\t\t\"bigEndianBytes\": {}\n \t\t\t\t\t\t\t\t}\n \t\t\t\t\t\t\t}\n \t\t\t\t\t\t}\n \t\t\t\t\t}\n \t\t\t\t}\n changeStreamRetention: 24h0m0s\n automatedBackupPolicy:\n retentionPeriod: 72h0m0s\n frequency: 24h0m0s\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\n-\u003e **Fields affected by import** The following fields can't be read and will show diffs if set in config when imported: `split_keys`\n\nBigtable Tables can be imported using any of these accepted formats:\n\n* `projects/{{project}}/instances/{{instance_name}}/tables/{{name}}`\n\n* `{{project}}/{{instance_name}}/{{name}}`\n\n* `{{instance_name}}/{{name}}`\n\nWhen using the `pulumi import` command, Bigtable Tables can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:bigtable/table:Table default projects/{{project}}/instances/{{instance_name}}/tables/{{name}}\n```\n\n```sh\n$ pulumi import gcp:bigtable/table:Table default {{project}}/{{instance_name}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:bigtable/table:Table default {{instance_name}}/{{name}}\n```\n\n", "properties": { "automatedBackupPolicy": { "$ref": "#/types/gcp:bigtable/TableAutomatedBackupPolicy:TableAutomatedBackupPolicy", @@ -131021,7 +132539,7 @@ }, "desiredState": { "type": "string", - "description": "Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA.\n" + "description": "Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values:\nENABLED, DISABLED, STAGED.\n" }, "effectiveLabels": { "type": "object", @@ -131141,7 +132659,7 @@ }, "desiredState": { "type": "string", - "description": "Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA.\n" + "description": "Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values:\nENABLED, DISABLED, STAGED.\n" }, "gcsBucket": { "type": "string", @@ -131237,7 +132755,7 @@ }, "desiredState": { "type": "string", - "description": "Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA.\n" + "description": "Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values:\nENABLED, DISABLED, STAGED.\n" }, "effectiveLabels": { "type": "object", @@ -132678,6 +134196,13 @@ "description": "The combination of labels configured directly on the resource\nand default labels configured on the provider.\n", "secret": true }, + "sanDnsnames": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6)\n" + }, "scope": { "type": "string", "description": "The scope of the certificate.\nDEFAULT: Certificates with default scope are served from core Google data centers.\nIf unsure, choose this option.\nEDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence.\nSee https://cloud.google.com/vpc/docs/edge-locations.\nALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs).\nSee https://cloud.google.com/compute/docs/regions-zones\n" @@ -132691,6 +134216,7 @@ "effectiveLabels", "name", "project", + "sanDnsnames", "pulumiLabels" ], "inputProperties": { @@ -132786,6 +134312,13 @@ "description": "The combination of labels configured directly on the resource\nand default labels configured on the provider.\n", "secret": true }, + "sanDnsnames": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6)\n" + }, "scope": { "type": "string", "description": "The scope of the certificate.\nDEFAULT: Certificates with default scope are served from core Google data centers.\nIf unsure, choose this option.\nEDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence.\nSee https://cloud.google.com/vpc/docs/edge-locations.\nALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs).\nSee https://cloud.google.com/compute/docs/regions-zones\n", @@ -141043,7 +142576,7 @@ } }, "gcp:cloudrunv2/service:Service": { - "description": "Service acts as a top-level container that manages a set of configurations and revision templates which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership.\n\n\nTo get more information about Service, see:\n\n* [API documentation](https://cloud.google.com/run/docs/reference/rest/v2/projects.locations.services)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/run/docs/)\n\n## Example Usage\n\n### Cloudrunv2 Service Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n ingress: \"INGRESS_TRAFFIC_ALL\",\n template: {\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n }],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n ingress=\"INGRESS_TRAFFIC_ALL\",\n template={\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n }],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Ingress = \"INGRESS_TRAFFIC_ALL\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tIngress: pulumi.String(\"INGRESS_TRAFFIC_ALL\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .ingress(\"INGRESS_TRAFFIC_ALL\")\n .template(ServiceTemplateArgs.builder()\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n ingress: INGRESS_TRAFFIC_ALL\n template:\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Limits\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n ingress: \"INGRESS_TRAFFIC_ALL\",\n template: {\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n resources: {\n limits: {\n cpu: \"2\",\n memory: \"1024Mi\",\n },\n },\n }],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n ingress=\"INGRESS_TRAFFIC_ALL\",\n template={\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n \"resources\": {\n \"limits\": {\n \"cpu\": \"2\",\n \"memory\": \"1024Mi\",\n },\n },\n }],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Ingress = \"INGRESS_TRAFFIC_ALL\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n Resources = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerResourcesArgs\n {\n Limits = \n {\n { \"cpu\", \"2\" },\n { \"memory\", \"1024Mi\" },\n },\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tIngress: pulumi.String(\"INGRESS_TRAFFIC_ALL\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t\tResources: \u0026cloudrunv2.ServiceTemplateContainerResourcesArgs{\n\t\t\t\t\t\t\tLimits: pulumi.StringMap{\n\t\t\t\t\t\t\t\t\"cpu\": pulumi.String(\"2\"),\n\t\t\t\t\t\t\t\t\"memory\": pulumi.String(\"1024Mi\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .ingress(\"INGRESS_TRAFFIC_ALL\")\n .template(ServiceTemplateArgs.builder()\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .resources(ServiceTemplateContainerResourcesArgs.builder()\n .limits(Map.ofEntries(\n Map.entry(\"cpu\", \"2\"),\n Map.entry(\"memory\", \"1024Mi\")\n ))\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n ingress: INGRESS_TRAFFIC_ALL\n template:\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n resources:\n limits:\n cpu: '2'\n memory: 1024Mi\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Sql\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst secret = new gcp.secretmanager.Secret(\"secret\", {\n secretId: \"secret-1\",\n replication: {\n auto: {},\n },\n});\nconst secret_version_data = new gcp.secretmanager.SecretVersion(\"secret-version-data\", {\n secret: secret.name,\n secretData: \"secret-data\",\n});\nconst instance = new gcp.sql.DatabaseInstance(\"instance\", {\n name: \"cloudrun-sql\",\n region: \"us-central1\",\n databaseVersion: \"MYSQL_5_7\",\n settings: {\n tier: \"db-f1-micro\",\n },\n deletionProtection: true,\n});\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n ingress: \"INGRESS_TRAFFIC_ALL\",\n template: {\n scaling: {\n maxInstanceCount: 2,\n },\n volumes: [{\n name: \"cloudsql\",\n cloudSqlInstance: {\n instances: [instance.connectionName],\n },\n }],\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n envs: [\n {\n name: \"FOO\",\n value: \"bar\",\n },\n {\n name: \"SECRET_ENV_VAR\",\n valueSource: {\n secretKeyRef: {\n secret: secret.secretId,\n version: \"1\",\n },\n },\n },\n ],\n volumeMounts: [{\n name: \"cloudsql\",\n mountPath: \"/cloudsql\",\n }],\n }],\n },\n traffics: [{\n type: \"TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\",\n percent: 100,\n }],\n}, {\n dependsOn: [secret_version_data],\n});\nconst project = gcp.organizations.getProject({});\nconst secret_access = new gcp.secretmanager.SecretIamMember(\"secret-access\", {\n secretId: secret.id,\n role: \"roles/secretmanager.secretAccessor\",\n member: project.then(project =\u003e `serviceAccount:${project.number}-compute@developer.gserviceaccount.com`),\n}, {\n dependsOn: [secret],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nsecret = gcp.secretmanager.Secret(\"secret\",\n secret_id=\"secret-1\",\n replication={\n \"auto\": {},\n })\nsecret_version_data = gcp.secretmanager.SecretVersion(\"secret-version-data\",\n secret=secret.name,\n secret_data=\"secret-data\")\ninstance = gcp.sql.DatabaseInstance(\"instance\",\n name=\"cloudrun-sql\",\n region=\"us-central1\",\n database_version=\"MYSQL_5_7\",\n settings={\n \"tier\": \"db-f1-micro\",\n },\n deletion_protection=True)\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n ingress=\"INGRESS_TRAFFIC_ALL\",\n template={\n \"scaling\": {\n \"max_instance_count\": 2,\n },\n \"volumes\": [{\n \"name\": \"cloudsql\",\n \"cloud_sql_instance\": {\n \"instances\": [instance.connection_name],\n },\n }],\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n \"envs\": [\n {\n \"name\": \"FOO\",\n \"value\": \"bar\",\n },\n {\n \"name\": \"SECRET_ENV_VAR\",\n \"value_source\": {\n \"secret_key_ref\": {\n \"secret\": secret.secret_id,\n \"version\": \"1\",\n },\n },\n },\n ],\n \"volume_mounts\": [{\n \"name\": \"cloudsql\",\n \"mount_path\": \"/cloudsql\",\n }],\n }],\n },\n traffics=[{\n \"type\": \"TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\",\n \"percent\": 100,\n }],\n opts = pulumi.ResourceOptions(depends_on=[secret_version_data]))\nproject = gcp.organizations.get_project()\nsecret_access = gcp.secretmanager.SecretIamMember(\"secret-access\",\n secret_id=secret.id,\n role=\"roles/secretmanager.secretAccessor\",\n member=f\"serviceAccount:{project.number}-compute@developer.gserviceaccount.com\",\n opts = pulumi.ResourceOptions(depends_on=[secret]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var secret = new Gcp.SecretManager.Secret(\"secret\", new()\n {\n SecretId = \"secret-1\",\n Replication = new Gcp.SecretManager.Inputs.SecretReplicationArgs\n {\n Auto = null,\n },\n });\n\n var secret_version_data = new Gcp.SecretManager.SecretVersion(\"secret-version-data\", new()\n {\n Secret = secret.Name,\n SecretData = \"secret-data\",\n });\n\n var instance = new Gcp.Sql.DatabaseInstance(\"instance\", new()\n {\n Name = \"cloudrun-sql\",\n Region = \"us-central1\",\n DatabaseVersion = \"MYSQL_5_7\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-f1-micro\",\n },\n DeletionProtection = true,\n });\n\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Ingress = \"INGRESS_TRAFFIC_ALL\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Scaling = new Gcp.CloudRunV2.Inputs.ServiceTemplateScalingArgs\n {\n MaxInstanceCount = 2,\n },\n Volumes = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeArgs\n {\n Name = \"cloudsql\",\n CloudSqlInstance = new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeCloudSqlInstanceArgs\n {\n Instances = new[]\n {\n instance.ConnectionName,\n },\n },\n },\n },\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n Envs = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerEnvArgs\n {\n Name = \"FOO\",\n Value = \"bar\",\n },\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerEnvArgs\n {\n Name = \"SECRET_ENV_VAR\",\n ValueSource = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerEnvValueSourceArgs\n {\n SecretKeyRef = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerEnvValueSourceSecretKeyRefArgs\n {\n Secret = secret.SecretId,\n Version = \"1\",\n },\n },\n },\n },\n VolumeMounts = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerVolumeMountArgs\n {\n Name = \"cloudsql\",\n MountPath = \"/cloudsql\",\n },\n },\n },\n },\n },\n Traffics = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTrafficArgs\n {\n Type = \"TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\",\n Percent = 100,\n },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n secret_version_data,\n },\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var secret_access = new Gcp.SecretManager.SecretIamMember(\"secret-access\", new()\n {\n SecretId = secret.Id,\n Role = \"roles/secretmanager.secretAccessor\",\n Member = $\"serviceAccount:{project.Apply(getProjectResult =\u003e getProjectResult.Number)}-compute@developer.gserviceaccount.com\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n secret,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/secretmanager\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tsecret, err := secretmanager.NewSecret(ctx, \"secret\", \u0026secretmanager.SecretArgs{\n\t\t\tSecretId: pulumi.String(\"secret-1\"),\n\t\t\tReplication: \u0026secretmanager.SecretReplicationArgs{\n\t\t\t\tAuto: nil,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = secretmanager.NewSecretVersion(ctx, \"secret-version-data\", \u0026secretmanager.SecretVersionArgs{\n\t\t\tSecret: secret.Name,\n\t\t\tSecretData: pulumi.String(\"secret-data\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinstance, err := sql.NewDatabaseInstance(ctx, \"instance\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-sql\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_5_7\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-f1-micro\"),\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tIngress: pulumi.String(\"INGRESS_TRAFFIC_ALL\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tScaling: \u0026cloudrunv2.ServiceTemplateScalingArgs{\n\t\t\t\t\tMaxInstanceCount: pulumi.Int(2),\n\t\t\t\t},\n\t\t\t\tVolumes: cloudrunv2.ServiceTemplateVolumeArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVolumeArgs{\n\t\t\t\t\t\tName: pulumi.String(\"cloudsql\"),\n\t\t\t\t\t\tCloudSqlInstance: \u0026cloudrunv2.ServiceTemplateVolumeCloudSqlInstanceArgs{\n\t\t\t\t\t\t\tInstances: pulumi.StringArray{\n\t\t\t\t\t\t\t\tinstance.ConnectionName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t\tEnvs: cloudrunv2.ServiceTemplateContainerEnvArray{\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerEnvArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"FOO\"),\n\t\t\t\t\t\t\t\tValue: pulumi.String(\"bar\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerEnvArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"SECRET_ENV_VAR\"),\n\t\t\t\t\t\t\t\tValueSource: \u0026cloudrunv2.ServiceTemplateContainerEnvValueSourceArgs{\n\t\t\t\t\t\t\t\t\tSecretKeyRef: \u0026cloudrunv2.ServiceTemplateContainerEnvValueSourceSecretKeyRefArgs{\n\t\t\t\t\t\t\t\t\t\tSecret: secret.SecretId,\n\t\t\t\t\t\t\t\t\t\tVersion: pulumi.String(\"1\"),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVolumeMounts: cloudrunv2.ServiceTemplateContainerVolumeMountArray{\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerVolumeMountArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"cloudsql\"),\n\t\t\t\t\t\t\t\tMountPath: pulumi.String(\"/cloudsql\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tTraffics: cloudrunv2.ServiceTrafficArray{\n\t\t\t\t\u0026cloudrunv2.ServiceTrafficArgs{\n\t\t\t\t\tType: pulumi.String(\"TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\"),\n\t\t\t\t\tPercent: pulumi.Int(100),\n\t\t\t\t},\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsecret_version_data,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = secretmanager.NewSecretIamMember(ctx, \"secret-access\", \u0026secretmanager.SecretIamMemberArgs{\n\t\t\tSecretId: secret.ID(),\n\t\t\tRole: pulumi.String(\"roles/secretmanager.secretAccessor\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:%v-compute@developer.gserviceaccount.com\", project.Number),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsecret,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.secretmanager.Secret;\nimport com.pulumi.gcp.secretmanager.SecretArgs;\nimport com.pulumi.gcp.secretmanager.inputs.SecretReplicationArgs;\nimport com.pulumi.gcp.secretmanager.inputs.SecretReplicationAutoArgs;\nimport com.pulumi.gcp.secretmanager.SecretVersion;\nimport com.pulumi.gcp.secretmanager.SecretVersionArgs;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateScalingArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTrafficArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.secretmanager.SecretIamMember;\nimport com.pulumi.gcp.secretmanager.SecretIamMemberArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var secret = new Secret(\"secret\", SecretArgs.builder()\n .secretId(\"secret-1\")\n .replication(SecretReplicationArgs.builder()\n .auto()\n .build())\n .build());\n\n var secret_version_data = new SecretVersion(\"secret-version-data\", SecretVersionArgs.builder()\n .secret(secret.name())\n .secretData(\"secret-data\")\n .build());\n\n var instance = new DatabaseInstance(\"instance\", DatabaseInstanceArgs.builder()\n .name(\"cloudrun-sql\")\n .region(\"us-central1\")\n .databaseVersion(\"MYSQL_5_7\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-f1-micro\")\n .build())\n .deletionProtection(\"true\")\n .build());\n\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .ingress(\"INGRESS_TRAFFIC_ALL\")\n .template(ServiceTemplateArgs.builder()\n .scaling(ServiceTemplateScalingArgs.builder()\n .maxInstanceCount(2)\n .build())\n .volumes(ServiceTemplateVolumeArgs.builder()\n .name(\"cloudsql\")\n .cloudSqlInstance(ServiceTemplateVolumeCloudSqlInstanceArgs.builder()\n .instances(instance.connectionName())\n .build())\n .build())\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .envs( \n ServiceTemplateContainerEnvArgs.builder()\n .name(\"FOO\")\n .value(\"bar\")\n .build(),\n ServiceTemplateContainerEnvArgs.builder()\n .name(\"SECRET_ENV_VAR\")\n .valueSource(ServiceTemplateContainerEnvValueSourceArgs.builder()\n .secretKeyRef(ServiceTemplateContainerEnvValueSourceSecretKeyRefArgs.builder()\n .secret(secret.secretId())\n .version(\"1\")\n .build())\n .build())\n .build())\n .volumeMounts(ServiceTemplateContainerVolumeMountArgs.builder()\n .name(\"cloudsql\")\n .mountPath(\"/cloudsql\")\n .build())\n .build())\n .build())\n .traffics(ServiceTrafficArgs.builder()\n .type(\"TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\")\n .percent(100)\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(secret_version_data)\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n var secret_access = new SecretIamMember(\"secret-access\", SecretIamMemberArgs.builder()\n .secretId(secret.id())\n .role(\"roles/secretmanager.secretAccessor\")\n .member(String.format(\"serviceAccount:%s-compute@developer.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build(), CustomResourceOptions.builder()\n .dependsOn(secret)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n ingress: INGRESS_TRAFFIC_ALL\n template:\n scaling:\n maxInstanceCount: 2\n volumes:\n - name: cloudsql\n cloudSqlInstance:\n instances:\n - ${instance.connectionName}\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n envs:\n - name: FOO\n value: bar\n - name: SECRET_ENV_VAR\n valueSource:\n secretKeyRef:\n secret: ${secret.secretId}\n version: '1'\n volumeMounts:\n - name: cloudsql\n mountPath: /cloudsql\n traffics:\n - type: TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\n percent: 100\n options:\n dependson:\n - ${[\"secret-version-data\"]}\n secret:\n type: gcp:secretmanager:Secret\n properties:\n secretId: secret-1\n replication:\n auto: {}\n secret-version-data:\n type: gcp:secretmanager:SecretVersion\n properties:\n secret: ${secret.name}\n secretData: secret-data\n secret-access:\n type: gcp:secretmanager:SecretIamMember\n properties:\n secretId: ${secret.id}\n role: roles/secretmanager.secretAccessor\n member: serviceAccount:${project.number}-compute@developer.gserviceaccount.com\n options:\n dependson:\n - ${secret}\n instance:\n type: gcp:sql:DatabaseInstance\n properties:\n name: cloudrun-sql\n region: us-central1\n databaseVersion: MYSQL_5_7\n settings:\n tier: db-f1-micro\n deletionProtection: 'true'\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Vpcaccess\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst customTestNetwork = new gcp.compute.Network(\"custom_test\", {\n name: \"run-network\",\n autoCreateSubnetworks: false,\n});\nconst customTest = new gcp.compute.Subnetwork(\"custom_test\", {\n name: \"run-subnetwork\",\n ipCidrRange: \"10.2.0.0/28\",\n region: \"us-central1\",\n network: customTestNetwork.id,\n});\nconst connector = new gcp.vpcaccess.Connector(\"connector\", {\n name: \"run-vpc\",\n subnet: {\n name: customTest.name,\n },\n machineType: \"e2-standard-4\",\n minInstances: 2,\n maxInstances: 3,\n region: \"us-central1\",\n});\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n template: {\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n }],\n vpcAccess: {\n connector: connector.id,\n egress: \"ALL_TRAFFIC\",\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncustom_test_network = gcp.compute.Network(\"custom_test\",\n name=\"run-network\",\n auto_create_subnetworks=False)\ncustom_test = gcp.compute.Subnetwork(\"custom_test\",\n name=\"run-subnetwork\",\n ip_cidr_range=\"10.2.0.0/28\",\n region=\"us-central1\",\n network=custom_test_network.id)\nconnector = gcp.vpcaccess.Connector(\"connector\",\n name=\"run-vpc\",\n subnet={\n \"name\": custom_test.name,\n },\n machine_type=\"e2-standard-4\",\n min_instances=2,\n max_instances=3,\n region=\"us-central1\")\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n template={\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n }],\n \"vpc_access\": {\n \"connector\": connector.id,\n \"egress\": \"ALL_TRAFFIC\",\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var customTestNetwork = new Gcp.Compute.Network(\"custom_test\", new()\n {\n Name = \"run-network\",\n AutoCreateSubnetworks = false,\n });\n\n var customTest = new Gcp.Compute.Subnetwork(\"custom_test\", new()\n {\n Name = \"run-subnetwork\",\n IpCidrRange = \"10.2.0.0/28\",\n Region = \"us-central1\",\n Network = customTestNetwork.Id,\n });\n\n var connector = new Gcp.VpcAccess.Connector(\"connector\", new()\n {\n Name = \"run-vpc\",\n Subnet = new Gcp.VpcAccess.Inputs.ConnectorSubnetArgs\n {\n Name = customTest.Name,\n },\n MachineType = \"e2-standard-4\",\n MinInstances = 2,\n MaxInstances = 3,\n Region = \"us-central1\",\n });\n\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n },\n },\n VpcAccess = new Gcp.CloudRunV2.Inputs.ServiceTemplateVpcAccessArgs\n {\n Connector = connector.Id,\n Egress = \"ALL_TRAFFIC\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/vpcaccess\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcustomTestNetwork, err := compute.NewNetwork(ctx, \"custom_test\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"run-network\"),\n\t\t\tAutoCreateSubnetworks: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcustomTest, err := compute.NewSubnetwork(ctx, \"custom_test\", \u0026compute.SubnetworkArgs{\n\t\t\tName: pulumi.String(\"run-subnetwork\"),\n\t\t\tIpCidrRange: pulumi.String(\"10.2.0.0/28\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tNetwork: customTestNetwork.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconnector, err := vpcaccess.NewConnector(ctx, \"connector\", \u0026vpcaccess.ConnectorArgs{\n\t\t\tName: pulumi.String(\"run-vpc\"),\n\t\t\tSubnet: \u0026vpcaccess.ConnectorSubnetArgs{\n\t\t\t\tName: customTest.Name,\n\t\t\t},\n\t\t\tMachineType: pulumi.String(\"e2-standard-4\"),\n\t\t\tMinInstances: pulumi.Int(2),\n\t\t\tMaxInstances: pulumi.Int(3),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVpcAccess: \u0026cloudrunv2.ServiceTemplateVpcAccessArgs{\n\t\t\t\t\tConnector: connector.ID(),\n\t\t\t\t\tEgress: pulumi.String(\"ALL_TRAFFIC\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.compute.Subnetwork;\nimport com.pulumi.gcp.compute.SubnetworkArgs;\nimport com.pulumi.gcp.vpcaccess.Connector;\nimport com.pulumi.gcp.vpcaccess.ConnectorArgs;\nimport com.pulumi.gcp.vpcaccess.inputs.ConnectorSubnetArgs;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateVpcAccessArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var customTestNetwork = new Network(\"customTestNetwork\", NetworkArgs.builder()\n .name(\"run-network\")\n .autoCreateSubnetworks(false)\n .build());\n\n var customTest = new Subnetwork(\"customTest\", SubnetworkArgs.builder()\n .name(\"run-subnetwork\")\n .ipCidrRange(\"10.2.0.0/28\")\n .region(\"us-central1\")\n .network(customTestNetwork.id())\n .build());\n\n var connector = new Connector(\"connector\", ConnectorArgs.builder()\n .name(\"run-vpc\")\n .subnet(ConnectorSubnetArgs.builder()\n .name(customTest.name())\n .build())\n .machineType(\"e2-standard-4\")\n .minInstances(2)\n .maxInstances(3)\n .region(\"us-central1\")\n .build());\n\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .template(ServiceTemplateArgs.builder()\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .build())\n .vpcAccess(ServiceTemplateVpcAccessArgs.builder()\n .connector(connector.id())\n .egress(\"ALL_TRAFFIC\")\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n template:\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n vpcAccess:\n connector: ${connector.id}\n egress: ALL_TRAFFIC\n connector:\n type: gcp:vpcaccess:Connector\n properties:\n name: run-vpc\n subnet:\n name: ${customTest.name}\n machineType: e2-standard-4\n minInstances: 2\n maxInstances: 3\n region: us-central1\n customTest:\n type: gcp:compute:Subnetwork\n name: custom_test\n properties:\n name: run-subnetwork\n ipCidrRange: 10.2.0.0/28\n region: us-central1\n network: ${customTestNetwork.id}\n customTestNetwork:\n type: gcp:compute:Network\n name: custom_test\n properties:\n name: run-network\n autoCreateSubnetworks: false\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Directvpc\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n launchStage: \"GA\",\n template: {\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n }],\n vpcAccess: {\n networkInterfaces: [{\n network: \"default\",\n subnetwork: \"default\",\n tags: [\n \"tag1\",\n \"tag2\",\n \"tag3\",\n ],\n }],\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n launch_stage=\"GA\",\n template={\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n }],\n \"vpc_access\": {\n \"network_interfaces\": [{\n \"network\": \"default\",\n \"subnetwork\": \"default\",\n \"tags\": [\n \"tag1\",\n \"tag2\",\n \"tag3\",\n ],\n }],\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n LaunchStage = \"GA\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n },\n },\n VpcAccess = new Gcp.CloudRunV2.Inputs.ServiceTemplateVpcAccessArgs\n {\n NetworkInterfaces = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVpcAccessNetworkInterfaceArgs\n {\n Network = \"default\",\n Subnetwork = \"default\",\n Tags = new[]\n {\n \"tag1\",\n \"tag2\",\n \"tag3\",\n },\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tLaunchStage: pulumi.String(\"GA\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVpcAccess: \u0026cloudrunv2.ServiceTemplateVpcAccessArgs{\n\t\t\t\t\tNetworkInterfaces: cloudrunv2.ServiceTemplateVpcAccessNetworkInterfaceArray{\n\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVpcAccessNetworkInterfaceArgs{\n\t\t\t\t\t\t\tNetwork: pulumi.String(\"default\"),\n\t\t\t\t\t\t\tSubnetwork: pulumi.String(\"default\"),\n\t\t\t\t\t\t\tTags: pulumi.StringArray{\n\t\t\t\t\t\t\t\tpulumi.String(\"tag1\"),\n\t\t\t\t\t\t\t\tpulumi.String(\"tag2\"),\n\t\t\t\t\t\t\t\tpulumi.String(\"tag3\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateVpcAccessArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .launchStage(\"GA\")\n .template(ServiceTemplateArgs.builder()\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .build())\n .vpcAccess(ServiceTemplateVpcAccessArgs.builder()\n .networkInterfaces(ServiceTemplateVpcAccessNetworkInterfaceArgs.builder()\n .network(\"default\")\n .subnetwork(\"default\")\n .tags( \n \"tag1\",\n \"tag2\",\n \"tag3\")\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n launchStage: GA\n template:\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n vpcAccess:\n networkInterfaces:\n - network: default\n subnetwork: default\n tags:\n - tag1\n - tag2\n - tag3\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Probes\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n template: {\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n startupProbe: {\n initialDelaySeconds: 0,\n timeoutSeconds: 1,\n periodSeconds: 3,\n failureThreshold: 1,\n tcpSocket: {\n port: 8080,\n },\n },\n livenessProbe: {\n httpGet: {\n path: \"/\",\n },\n },\n }],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n template={\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n \"startup_probe\": {\n \"initial_delay_seconds\": 0,\n \"timeout_seconds\": 1,\n \"period_seconds\": 3,\n \"failure_threshold\": 1,\n \"tcp_socket\": {\n \"port\": 8080,\n },\n },\n \"liveness_probe\": {\n \"http_get\": {\n \"path\": \"/\",\n },\n },\n }],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n StartupProbe = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerStartupProbeArgs\n {\n InitialDelaySeconds = 0,\n TimeoutSeconds = 1,\n PeriodSeconds = 3,\n FailureThreshold = 1,\n TcpSocket = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerStartupProbeTcpSocketArgs\n {\n Port = 8080,\n },\n },\n LivenessProbe = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerLivenessProbeArgs\n {\n HttpGet = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerLivenessProbeHttpGetArgs\n {\n Path = \"/\",\n },\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t\tStartupProbe: \u0026cloudrunv2.ServiceTemplateContainerStartupProbeArgs{\n\t\t\t\t\t\t\tInitialDelaySeconds: pulumi.Int(0),\n\t\t\t\t\t\t\tTimeoutSeconds: pulumi.Int(1),\n\t\t\t\t\t\t\tPeriodSeconds: pulumi.Int(3),\n\t\t\t\t\t\t\tFailureThreshold: pulumi.Int(1),\n\t\t\t\t\t\t\tTcpSocket: \u0026cloudrunv2.ServiceTemplateContainerStartupProbeTcpSocketArgs{\n\t\t\t\t\t\t\t\tPort: pulumi.Int(8080),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tLivenessProbe: \u0026cloudrunv2.ServiceTemplateContainerLivenessProbeArgs{\n\t\t\t\t\t\t\tHttpGet: \u0026cloudrunv2.ServiceTemplateContainerLivenessProbeHttpGetArgs{\n\t\t\t\t\t\t\t\tPath: pulumi.String(\"/\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .template(ServiceTemplateArgs.builder()\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .startupProbe(ServiceTemplateContainerStartupProbeArgs.builder()\n .initialDelaySeconds(0)\n .timeoutSeconds(1)\n .periodSeconds(3)\n .failureThreshold(1)\n .tcpSocket(ServiceTemplateContainerStartupProbeTcpSocketArgs.builder()\n .port(8080)\n .build())\n .build())\n .livenessProbe(ServiceTemplateContainerLivenessProbeArgs.builder()\n .httpGet(ServiceTemplateContainerLivenessProbeHttpGetArgs.builder()\n .path(\"/\")\n .build())\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n template:\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n startupProbe:\n initialDelaySeconds: 0\n timeoutSeconds: 1\n periodSeconds: 3\n failureThreshold: 1\n tcpSocket:\n port: 8080\n livenessProbe:\n httpGet:\n path: /\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Secret\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst secret = new gcp.secretmanager.Secret(\"secret\", {\n secretId: \"secret-1\",\n replication: {\n auto: {},\n },\n});\nconst secret_version_data = new gcp.secretmanager.SecretVersion(\"secret-version-data\", {\n secret: secret.name,\n secretData: \"secret-data\",\n});\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n ingress: \"INGRESS_TRAFFIC_ALL\",\n template: {\n volumes: [{\n name: \"a-volume\",\n secret: {\n secret: secret.secretId,\n defaultMode: 292,\n items: [{\n version: \"1\",\n path: \"my-secret\",\n }],\n },\n }],\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n volumeMounts: [{\n name: \"a-volume\",\n mountPath: \"/secrets\",\n }],\n }],\n },\n}, {\n dependsOn: [secret_version_data],\n});\nconst project = gcp.organizations.getProject({});\nconst secret_access = new gcp.secretmanager.SecretIamMember(\"secret-access\", {\n secretId: secret.id,\n role: \"roles/secretmanager.secretAccessor\",\n member: project.then(project =\u003e `serviceAccount:${project.number}-compute@developer.gserviceaccount.com`),\n}, {\n dependsOn: [secret],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nsecret = gcp.secretmanager.Secret(\"secret\",\n secret_id=\"secret-1\",\n replication={\n \"auto\": {},\n })\nsecret_version_data = gcp.secretmanager.SecretVersion(\"secret-version-data\",\n secret=secret.name,\n secret_data=\"secret-data\")\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n ingress=\"INGRESS_TRAFFIC_ALL\",\n template={\n \"volumes\": [{\n \"name\": \"a-volume\",\n \"secret\": {\n \"secret\": secret.secret_id,\n \"default_mode\": 292,\n \"items\": [{\n \"version\": \"1\",\n \"path\": \"my-secret\",\n }],\n },\n }],\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n \"volume_mounts\": [{\n \"name\": \"a-volume\",\n \"mount_path\": \"/secrets\",\n }],\n }],\n },\n opts = pulumi.ResourceOptions(depends_on=[secret_version_data]))\nproject = gcp.organizations.get_project()\nsecret_access = gcp.secretmanager.SecretIamMember(\"secret-access\",\n secret_id=secret.id,\n role=\"roles/secretmanager.secretAccessor\",\n member=f\"serviceAccount:{project.number}-compute@developer.gserviceaccount.com\",\n opts = pulumi.ResourceOptions(depends_on=[secret]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var secret = new Gcp.SecretManager.Secret(\"secret\", new()\n {\n SecretId = \"secret-1\",\n Replication = new Gcp.SecretManager.Inputs.SecretReplicationArgs\n {\n Auto = null,\n },\n });\n\n var secret_version_data = new Gcp.SecretManager.SecretVersion(\"secret-version-data\", new()\n {\n Secret = secret.Name,\n SecretData = \"secret-data\",\n });\n\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Ingress = \"INGRESS_TRAFFIC_ALL\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Volumes = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeArgs\n {\n Name = \"a-volume\",\n Secret = new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeSecretArgs\n {\n Secret = secret.SecretId,\n DefaultMode = 292,\n Items = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeSecretItemArgs\n {\n Version = \"1\",\n Path = \"my-secret\",\n },\n },\n },\n },\n },\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n VolumeMounts = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerVolumeMountArgs\n {\n Name = \"a-volume\",\n MountPath = \"/secrets\",\n },\n },\n },\n },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n secret_version_data,\n },\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var secret_access = new Gcp.SecretManager.SecretIamMember(\"secret-access\", new()\n {\n SecretId = secret.Id,\n Role = \"roles/secretmanager.secretAccessor\",\n Member = $\"serviceAccount:{project.Apply(getProjectResult =\u003e getProjectResult.Number)}-compute@developer.gserviceaccount.com\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n secret,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/secretmanager\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tsecret, err := secretmanager.NewSecret(ctx, \"secret\", \u0026secretmanager.SecretArgs{\n\t\t\tSecretId: pulumi.String(\"secret-1\"),\n\t\t\tReplication: \u0026secretmanager.SecretReplicationArgs{\n\t\t\t\tAuto: nil,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = secretmanager.NewSecretVersion(ctx, \"secret-version-data\", \u0026secretmanager.SecretVersionArgs{\n\t\t\tSecret: secret.Name,\n\t\t\tSecretData: pulumi.String(\"secret-data\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tIngress: pulumi.String(\"INGRESS_TRAFFIC_ALL\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tVolumes: cloudrunv2.ServiceTemplateVolumeArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVolumeArgs{\n\t\t\t\t\t\tName: pulumi.String(\"a-volume\"),\n\t\t\t\t\t\tSecret: \u0026cloudrunv2.ServiceTemplateVolumeSecretArgs{\n\t\t\t\t\t\t\tSecret: secret.SecretId,\n\t\t\t\t\t\t\tDefaultMode: pulumi.Int(292),\n\t\t\t\t\t\t\tItems: cloudrunv2.ServiceTemplateVolumeSecretItemArray{\n\t\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVolumeSecretItemArgs{\n\t\t\t\t\t\t\t\t\tVersion: pulumi.String(\"1\"),\n\t\t\t\t\t\t\t\t\tPath: pulumi.String(\"my-secret\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t\tVolumeMounts: cloudrunv2.ServiceTemplateContainerVolumeMountArray{\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerVolumeMountArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"a-volume\"),\n\t\t\t\t\t\t\t\tMountPath: pulumi.String(\"/secrets\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsecret_version_data,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = secretmanager.NewSecretIamMember(ctx, \"secret-access\", \u0026secretmanager.SecretIamMemberArgs{\n\t\t\tSecretId: secret.ID(),\n\t\t\tRole: pulumi.String(\"roles/secretmanager.secretAccessor\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:%v-compute@developer.gserviceaccount.com\", project.Number),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsecret,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.secretmanager.Secret;\nimport com.pulumi.gcp.secretmanager.SecretArgs;\nimport com.pulumi.gcp.secretmanager.inputs.SecretReplicationArgs;\nimport com.pulumi.gcp.secretmanager.inputs.SecretReplicationAutoArgs;\nimport com.pulumi.gcp.secretmanager.SecretVersion;\nimport com.pulumi.gcp.secretmanager.SecretVersionArgs;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.secretmanager.SecretIamMember;\nimport com.pulumi.gcp.secretmanager.SecretIamMemberArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var secret = new Secret(\"secret\", SecretArgs.builder()\n .secretId(\"secret-1\")\n .replication(SecretReplicationArgs.builder()\n .auto()\n .build())\n .build());\n\n var secret_version_data = new SecretVersion(\"secret-version-data\", SecretVersionArgs.builder()\n .secret(secret.name())\n .secretData(\"secret-data\")\n .build());\n\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .ingress(\"INGRESS_TRAFFIC_ALL\")\n .template(ServiceTemplateArgs.builder()\n .volumes(ServiceTemplateVolumeArgs.builder()\n .name(\"a-volume\")\n .secret(ServiceTemplateVolumeSecretArgs.builder()\n .secret(secret.secretId())\n .defaultMode(292)\n .items(ServiceTemplateVolumeSecretItemArgs.builder()\n .version(\"1\")\n .path(\"my-secret\")\n .build())\n .build())\n .build())\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .volumeMounts(ServiceTemplateContainerVolumeMountArgs.builder()\n .name(\"a-volume\")\n .mountPath(\"/secrets\")\n .build())\n .build())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(secret_version_data)\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n var secret_access = new SecretIamMember(\"secret-access\", SecretIamMemberArgs.builder()\n .secretId(secret.id())\n .role(\"roles/secretmanager.secretAccessor\")\n .member(String.format(\"serviceAccount:%s-compute@developer.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build(), CustomResourceOptions.builder()\n .dependsOn(secret)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n ingress: INGRESS_TRAFFIC_ALL\n template:\n volumes:\n - name: a-volume\n secret:\n secret: ${secret.secretId}\n defaultMode: 292\n items:\n - version: '1'\n path: my-secret\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n volumeMounts:\n - name: a-volume\n mountPath: /secrets\n options:\n dependson:\n - ${[\"secret-version-data\"]}\n secret:\n type: gcp:secretmanager:Secret\n properties:\n secretId: secret-1\n replication:\n auto: {}\n secret-version-data:\n type: gcp:secretmanager:SecretVersion\n properties:\n secret: ${secret.name}\n secretData: secret-data\n secret-access:\n type: gcp:secretmanager:SecretIamMember\n properties:\n secretId: ${secret.id}\n role: roles/secretmanager.secretAccessor\n member: serviceAccount:${project.number}-compute@developer.gserviceaccount.com\n options:\n dependson:\n - ${secret}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Multicontainer\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n launchStage: \"BETA\",\n ingress: \"INGRESS_TRAFFIC_ALL\",\n template: {\n containers: [\n {\n name: \"hello-1\",\n ports: {\n containerPort: 8080,\n },\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n dependsOns: [\"hello-2\"],\n volumeMounts: [{\n name: \"empty-dir-volume\",\n mountPath: \"/mnt\",\n }],\n },\n {\n name: \"hello-2\",\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n envs: [{\n name: \"PORT\",\n value: \"8081\",\n }],\n startupProbe: {\n httpGet: {\n port: 8081,\n },\n },\n },\n ],\n volumes: [{\n name: \"empty-dir-volume\",\n emptyDir: {\n medium: \"MEMORY\",\n sizeLimit: \"256Mi\",\n },\n }],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n launch_stage=\"BETA\",\n ingress=\"INGRESS_TRAFFIC_ALL\",\n template={\n \"containers\": [\n {\n \"name\": \"hello-1\",\n \"ports\": {\n \"container_port\": 8080,\n },\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n \"depends_ons\": [\"hello-2\"],\n \"volume_mounts\": [{\n \"name\": \"empty-dir-volume\",\n \"mount_path\": \"/mnt\",\n }],\n },\n {\n \"name\": \"hello-2\",\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n \"envs\": [{\n \"name\": \"PORT\",\n \"value\": \"8081\",\n }],\n \"startup_probe\": {\n \"http_get\": {\n \"port\": 8081,\n },\n },\n },\n ],\n \"volumes\": [{\n \"name\": \"empty-dir-volume\",\n \"empty_dir\": {\n \"medium\": \"MEMORY\",\n \"size_limit\": \"256Mi\",\n },\n }],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n LaunchStage = \"BETA\",\n Ingress = \"INGRESS_TRAFFIC_ALL\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Name = \"hello-1\",\n Ports = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerPortsArgs\n {\n ContainerPort = 8080,\n },\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n DependsOns = new[]\n {\n \"hello-2\",\n },\n VolumeMounts = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerVolumeMountArgs\n {\n Name = \"empty-dir-volume\",\n MountPath = \"/mnt\",\n },\n },\n },\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Name = \"hello-2\",\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n Envs = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerEnvArgs\n {\n Name = \"PORT\",\n Value = \"8081\",\n },\n },\n StartupProbe = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerStartupProbeArgs\n {\n HttpGet = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerStartupProbeHttpGetArgs\n {\n Port = 8081,\n },\n },\n },\n },\n Volumes = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeArgs\n {\n Name = \"empty-dir-volume\",\n EmptyDir = new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeEmptyDirArgs\n {\n Medium = \"MEMORY\",\n SizeLimit = \"256Mi\",\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tLaunchStage: pulumi.String(\"BETA\"),\n\t\t\tIngress: pulumi.String(\"INGRESS_TRAFFIC_ALL\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tName: pulumi.String(\"hello-1\"),\n\t\t\t\t\t\tPorts: \u0026cloudrunv2.ServiceTemplateContainerPortsArgs{\n\t\t\t\t\t\t\tContainerPort: pulumi.Int(8080),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t\tDependsOns: pulumi.StringArray{\n\t\t\t\t\t\t\tpulumi.String(\"hello-2\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVolumeMounts: cloudrunv2.ServiceTemplateContainerVolumeMountArray{\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerVolumeMountArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"empty-dir-volume\"),\n\t\t\t\t\t\t\t\tMountPath: pulumi.String(\"/mnt\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tName: pulumi.String(\"hello-2\"),\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t\tEnvs: cloudrunv2.ServiceTemplateContainerEnvArray{\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerEnvArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"PORT\"),\n\t\t\t\t\t\t\t\tValue: pulumi.String(\"8081\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartupProbe: \u0026cloudrunv2.ServiceTemplateContainerStartupProbeArgs{\n\t\t\t\t\t\t\tHttpGet: \u0026cloudrunv2.ServiceTemplateContainerStartupProbeHttpGetArgs{\n\t\t\t\t\t\t\t\tPort: pulumi.Int(8081),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVolumes: cloudrunv2.ServiceTemplateVolumeArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVolumeArgs{\n\t\t\t\t\t\tName: pulumi.String(\"empty-dir-volume\"),\n\t\t\t\t\t\tEmptyDir: \u0026cloudrunv2.ServiceTemplateVolumeEmptyDirArgs{\n\t\t\t\t\t\t\tMedium: pulumi.String(\"MEMORY\"),\n\t\t\t\t\t\t\tSizeLimit: pulumi.String(\"256Mi\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .launchStage(\"BETA\")\n .ingress(\"INGRESS_TRAFFIC_ALL\")\n .template(ServiceTemplateArgs.builder()\n .containers( \n ServiceTemplateContainerArgs.builder()\n .name(\"hello-1\")\n .ports(ServiceTemplateContainerPortsArgs.builder()\n .containerPort(8080)\n .build())\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .dependsOns(\"hello-2\")\n .volumeMounts(ServiceTemplateContainerVolumeMountArgs.builder()\n .name(\"empty-dir-volume\")\n .mountPath(\"/mnt\")\n .build())\n .build(),\n ServiceTemplateContainerArgs.builder()\n .name(\"hello-2\")\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .envs(ServiceTemplateContainerEnvArgs.builder()\n .name(\"PORT\")\n .value(\"8081\")\n .build())\n .startupProbe(ServiceTemplateContainerStartupProbeArgs.builder()\n .httpGet(ServiceTemplateContainerStartupProbeHttpGetArgs.builder()\n .port(8081)\n .build())\n .build())\n .build())\n .volumes(ServiceTemplateVolumeArgs.builder()\n .name(\"empty-dir-volume\")\n .emptyDir(ServiceTemplateVolumeEmptyDirArgs.builder()\n .medium(\"MEMORY\")\n .sizeLimit(\"256Mi\")\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n launchStage: BETA\n ingress: INGRESS_TRAFFIC_ALL\n template:\n containers:\n - name: hello-1\n ports:\n containerPort: 8080\n image: us-docker.pkg.dev/cloudrun/container/hello\n dependsOns:\n - hello-2\n volumeMounts:\n - name: empty-dir-volume\n mountPath: /mnt\n - name: hello-2\n image: us-docker.pkg.dev/cloudrun/container/hello\n envs:\n - name: PORT\n value: '8081'\n startupProbe:\n httpGet:\n port: 8081\n volumes:\n - name: empty-dir-volume\n emptyDir:\n medium: MEMORY\n sizeLimit: 256Mi\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Mount Gcs\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst defaultBucket = new gcp.storage.Bucket(\"default\", {\n name: \"cloudrun-service\",\n location: \"US\",\n});\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n launchStage: \"BETA\",\n template: {\n executionEnvironment: \"EXECUTION_ENVIRONMENT_GEN2\",\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n volumeMounts: [{\n name: \"bucket\",\n mountPath: \"/var/www\",\n }],\n }],\n volumes: [{\n name: \"bucket\",\n gcs: {\n bucket: defaultBucket.name,\n readOnly: false,\n },\n }],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault_bucket = gcp.storage.Bucket(\"default\",\n name=\"cloudrun-service\",\n location=\"US\")\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n launch_stage=\"BETA\",\n template={\n \"execution_environment\": \"EXECUTION_ENVIRONMENT_GEN2\",\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n \"volume_mounts\": [{\n \"name\": \"bucket\",\n \"mount_path\": \"/var/www\",\n }],\n }],\n \"volumes\": [{\n \"name\": \"bucket\",\n \"gcs\": {\n \"bucket\": default_bucket.name,\n \"read_only\": False,\n },\n }],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var defaultBucket = new Gcp.Storage.Bucket(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"US\",\n });\n\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n LaunchStage = \"BETA\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n ExecutionEnvironment = \"EXECUTION_ENVIRONMENT_GEN2\",\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n VolumeMounts = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerVolumeMountArgs\n {\n Name = \"bucket\",\n MountPath = \"/var/www\",\n },\n },\n },\n },\n Volumes = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeArgs\n {\n Name = \"bucket\",\n Gcs = new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeGcsArgs\n {\n Bucket = defaultBucket.Name,\n ReadOnly = false,\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tdefaultBucket, err := storage.NewBucket(ctx, \"default\", \u0026storage.BucketArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tLaunchStage: pulumi.String(\"BETA\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tExecutionEnvironment: pulumi.String(\"EXECUTION_ENVIRONMENT_GEN2\"),\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t\tVolumeMounts: cloudrunv2.ServiceTemplateContainerVolumeMountArray{\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerVolumeMountArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"bucket\"),\n\t\t\t\t\t\t\t\tMountPath: pulumi.String(\"/var/www\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVolumes: cloudrunv2.ServiceTemplateVolumeArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVolumeArgs{\n\t\t\t\t\t\tName: pulumi.String(\"bucket\"),\n\t\t\t\t\t\tGcs: \u0026cloudrunv2.ServiceTemplateVolumeGcsArgs{\n\t\t\t\t\t\t\tBucket: defaultBucket.Name,\n\t\t\t\t\t\t\tReadOnly: pulumi.Bool(false),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.storage.Bucket;\nimport com.pulumi.gcp.storage.BucketArgs;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var defaultBucket = new Bucket(\"defaultBucket\", BucketArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"US\")\n .build());\n\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .launchStage(\"BETA\")\n .template(ServiceTemplateArgs.builder()\n .executionEnvironment(\"EXECUTION_ENVIRONMENT_GEN2\")\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .volumeMounts(ServiceTemplateContainerVolumeMountArgs.builder()\n .name(\"bucket\")\n .mountPath(\"/var/www\")\n .build())\n .build())\n .volumes(ServiceTemplateVolumeArgs.builder()\n .name(\"bucket\")\n .gcs(ServiceTemplateVolumeGcsArgs.builder()\n .bucket(defaultBucket.name())\n .readOnly(false)\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n launchStage: BETA\n template:\n executionEnvironment: EXECUTION_ENVIRONMENT_GEN2\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n volumeMounts:\n - name: bucket\n mountPath: /var/www\n volumes:\n - name: bucket\n gcs:\n bucket: ${defaultBucket.name}\n readOnly: false\n defaultBucket:\n type: gcp:storage:Bucket\n name: default\n properties:\n name: cloudrun-service\n location: US\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Mount Nfs\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst defaultInstance = new gcp.filestore.Instance(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1-b\",\n tier: \"BASIC_HDD\",\n fileShares: {\n capacityGb: 1024,\n name: \"share1\",\n },\n networks: [{\n network: \"default\",\n modes: [\"MODE_IPV4\"],\n }],\n});\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n ingress: \"INGRESS_TRAFFIC_ALL\",\n launchStage: \"BETA\",\n template: {\n executionEnvironment: \"EXECUTION_ENVIRONMENT_GEN2\",\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello:latest\",\n volumeMounts: [{\n name: \"nfs\",\n mountPath: \"/mnt/nfs/filestore\",\n }],\n }],\n vpcAccess: {\n networkInterfaces: [{\n network: \"default\",\n subnetwork: \"default\",\n }],\n },\n volumes: [{\n name: \"nfs\",\n nfs: {\n server: defaultInstance.networks.apply(networks =\u003e networks[0].ipAddresses?.[0]),\n path: \"/share1\",\n readOnly: false,\n },\n }],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault_instance = gcp.filestore.Instance(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1-b\",\n tier=\"BASIC_HDD\",\n file_shares={\n \"capacity_gb\": 1024,\n \"name\": \"share1\",\n },\n networks=[{\n \"network\": \"default\",\n \"modes\": [\"MODE_IPV4\"],\n }])\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n ingress=\"INGRESS_TRAFFIC_ALL\",\n launch_stage=\"BETA\",\n template={\n \"execution_environment\": \"EXECUTION_ENVIRONMENT_GEN2\",\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello:latest\",\n \"volume_mounts\": [{\n \"name\": \"nfs\",\n \"mount_path\": \"/mnt/nfs/filestore\",\n }],\n }],\n \"vpc_access\": {\n \"network_interfaces\": [{\n \"network\": \"default\",\n \"subnetwork\": \"default\",\n }],\n },\n \"volumes\": [{\n \"name\": \"nfs\",\n \"nfs\": {\n \"server\": default_instance.networks[0].ip_addresses[0],\n \"path\": \"/share1\",\n \"read_only\": False,\n },\n }],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var defaultInstance = new Gcp.Filestore.Instance(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1-b\",\n Tier = \"BASIC_HDD\",\n FileShares = new Gcp.Filestore.Inputs.InstanceFileSharesArgs\n {\n CapacityGb = 1024,\n Name = \"share1\",\n },\n Networks = new[]\n {\n new Gcp.Filestore.Inputs.InstanceNetworkArgs\n {\n Network = \"default\",\n Modes = new[]\n {\n \"MODE_IPV4\",\n },\n },\n },\n });\n\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Ingress = \"INGRESS_TRAFFIC_ALL\",\n LaunchStage = \"BETA\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n ExecutionEnvironment = \"EXECUTION_ENVIRONMENT_GEN2\",\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello:latest\",\n VolumeMounts = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerVolumeMountArgs\n {\n Name = \"nfs\",\n MountPath = \"/mnt/nfs/filestore\",\n },\n },\n },\n },\n VpcAccess = new Gcp.CloudRunV2.Inputs.ServiceTemplateVpcAccessArgs\n {\n NetworkInterfaces = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVpcAccessNetworkInterfaceArgs\n {\n Network = \"default\",\n Subnetwork = \"default\",\n },\n },\n },\n Volumes = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeArgs\n {\n Name = \"nfs\",\n Nfs = new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeNfsArgs\n {\n Server = defaultInstance.Networks.Apply(networks =\u003e networks[0].IpAddresses[0]),\n Path = \"/share1\",\n ReadOnly = false,\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/filestore\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tdefaultInstance, err := filestore.NewInstance(ctx, \"default\", \u0026filestore.InstanceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1-b\"),\n\t\t\tTier: pulumi.String(\"BASIC_HDD\"),\n\t\t\tFileShares: \u0026filestore.InstanceFileSharesArgs{\n\t\t\t\tCapacityGb: pulumi.Int(1024),\n\t\t\t\tName: pulumi.String(\"share1\"),\n\t\t\t},\n\t\t\tNetworks: filestore.InstanceNetworkArray{\n\t\t\t\t\u0026filestore.InstanceNetworkArgs{\n\t\t\t\t\tNetwork: pulumi.String(\"default\"),\n\t\t\t\t\tModes: pulumi.StringArray{\n\t\t\t\t\t\tpulumi.String(\"MODE_IPV4\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tIngress: pulumi.String(\"INGRESS_TRAFFIC_ALL\"),\n\t\t\tLaunchStage: pulumi.String(\"BETA\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tExecutionEnvironment: pulumi.String(\"EXECUTION_ENVIRONMENT_GEN2\"),\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello:latest\"),\n\t\t\t\t\t\tVolumeMounts: cloudrunv2.ServiceTemplateContainerVolumeMountArray{\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerVolumeMountArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"nfs\"),\n\t\t\t\t\t\t\t\tMountPath: pulumi.String(\"/mnt/nfs/filestore\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVpcAccess: \u0026cloudrunv2.ServiceTemplateVpcAccessArgs{\n\t\t\t\t\tNetworkInterfaces: cloudrunv2.ServiceTemplateVpcAccessNetworkInterfaceArray{\n\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVpcAccessNetworkInterfaceArgs{\n\t\t\t\t\t\t\tNetwork: pulumi.String(\"default\"),\n\t\t\t\t\t\t\tSubnetwork: pulumi.String(\"default\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVolumes: cloudrunv2.ServiceTemplateVolumeArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVolumeArgs{\n\t\t\t\t\t\tName: pulumi.String(\"nfs\"),\n\t\t\t\t\t\tNfs: \u0026cloudrunv2.ServiceTemplateVolumeNfsArgs{\n\t\t\t\t\t\t\tServer: defaultInstance.Networks.ApplyT(func(networks []filestore.InstanceNetwork) (*string, error) {\n\t\t\t\t\t\t\t\treturn \u0026networks[0].IpAddresses[0], nil\n\t\t\t\t\t\t\t}).(pulumi.StringPtrOutput),\n\t\t\t\t\t\t\tPath: pulumi.String(\"/share1\"),\n\t\t\t\t\t\t\tReadOnly: pulumi.Bool(false),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.filestore.Instance;\nimport com.pulumi.gcp.filestore.InstanceArgs;\nimport com.pulumi.gcp.filestore.inputs.InstanceFileSharesArgs;\nimport com.pulumi.gcp.filestore.inputs.InstanceNetworkArgs;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateVpcAccessArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var defaultInstance = new Instance(\"defaultInstance\", InstanceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1-b\")\n .tier(\"BASIC_HDD\")\n .fileShares(InstanceFileSharesArgs.builder()\n .capacityGb(1024)\n .name(\"share1\")\n .build())\n .networks(InstanceNetworkArgs.builder()\n .network(\"default\")\n .modes(\"MODE_IPV4\")\n .build())\n .build());\n\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .ingress(\"INGRESS_TRAFFIC_ALL\")\n .launchStage(\"BETA\")\n .template(ServiceTemplateArgs.builder()\n .executionEnvironment(\"EXECUTION_ENVIRONMENT_GEN2\")\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello:latest\")\n .volumeMounts(ServiceTemplateContainerVolumeMountArgs.builder()\n .name(\"nfs\")\n .mountPath(\"/mnt/nfs/filestore\")\n .build())\n .build())\n .vpcAccess(ServiceTemplateVpcAccessArgs.builder()\n .networkInterfaces(ServiceTemplateVpcAccessNetworkInterfaceArgs.builder()\n .network(\"default\")\n .subnetwork(\"default\")\n .build())\n .build())\n .volumes(ServiceTemplateVolumeArgs.builder()\n .name(\"nfs\")\n .nfs(ServiceTemplateVolumeNfsArgs.builder()\n .server(defaultInstance.networks().applyValue(networks -\u003e networks[0].ipAddresses()[0]))\n .path(\"/share1\")\n .readOnly(false)\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n ingress: INGRESS_TRAFFIC_ALL\n launchStage: BETA\n template:\n executionEnvironment: EXECUTION_ENVIRONMENT_GEN2\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello:latest\n volumeMounts:\n - name: nfs\n mountPath: /mnt/nfs/filestore\n vpcAccess:\n networkInterfaces:\n - network: default\n subnetwork: default\n volumes:\n - name: nfs\n nfs:\n server: ${defaultInstance.networks[0].ipAddresses[0]}\n path: /share1\n readOnly: false\n defaultInstance:\n type: gcp:filestore:Instance\n name: default\n properties:\n name: cloudrun-service\n location: us-central1-b\n tier: BASIC_HDD\n fileShares:\n capacityGb: 1024\n name: share1\n networks:\n - network: default\n modes:\n - MODE_IPV4\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nService can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/services/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, Service can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:cloudrunv2/service:Service default projects/{{project}}/locations/{{location}}/services/{{name}}\n```\n\n```sh\n$ pulumi import gcp:cloudrunv2/service:Service default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:cloudrunv2/service:Service default {{location}}/{{name}}\n```\n\n", + "description": "Service acts as a top-level container that manages a set of configurations and revision templates which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership.\n\n\nTo get more information about Service, see:\n\n* [API documentation](https://cloud.google.com/run/docs/reference/rest/v2/projects.locations.services)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/run/docs/)\n\n## Example Usage\n\n### Cloudrunv2 Service Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n ingress: \"INGRESS_TRAFFIC_ALL\",\n template: {\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n }],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n ingress=\"INGRESS_TRAFFIC_ALL\",\n template={\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n }],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Ingress = \"INGRESS_TRAFFIC_ALL\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tIngress: pulumi.String(\"INGRESS_TRAFFIC_ALL\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .ingress(\"INGRESS_TRAFFIC_ALL\")\n .template(ServiceTemplateArgs.builder()\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n ingress: INGRESS_TRAFFIC_ALL\n template:\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Limits\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n ingress: \"INGRESS_TRAFFIC_ALL\",\n template: {\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n resources: {\n limits: {\n cpu: \"2\",\n memory: \"1024Mi\",\n },\n },\n }],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n ingress=\"INGRESS_TRAFFIC_ALL\",\n template={\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n \"resources\": {\n \"limits\": {\n \"cpu\": \"2\",\n \"memory\": \"1024Mi\",\n },\n },\n }],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Ingress = \"INGRESS_TRAFFIC_ALL\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n Resources = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerResourcesArgs\n {\n Limits = \n {\n { \"cpu\", \"2\" },\n { \"memory\", \"1024Mi\" },\n },\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tIngress: pulumi.String(\"INGRESS_TRAFFIC_ALL\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t\tResources: \u0026cloudrunv2.ServiceTemplateContainerResourcesArgs{\n\t\t\t\t\t\t\tLimits: pulumi.StringMap{\n\t\t\t\t\t\t\t\t\"cpu\": pulumi.String(\"2\"),\n\t\t\t\t\t\t\t\t\"memory\": pulumi.String(\"1024Mi\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .ingress(\"INGRESS_TRAFFIC_ALL\")\n .template(ServiceTemplateArgs.builder()\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .resources(ServiceTemplateContainerResourcesArgs.builder()\n .limits(Map.ofEntries(\n Map.entry(\"cpu\", \"2\"),\n Map.entry(\"memory\", \"1024Mi\")\n ))\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n ingress: INGRESS_TRAFFIC_ALL\n template:\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n resources:\n limits:\n cpu: '2'\n memory: 1024Mi\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Sql\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst secret = new gcp.secretmanager.Secret(\"secret\", {\n secretId: \"secret-1\",\n replication: {\n auto: {},\n },\n});\nconst secret_version_data = new gcp.secretmanager.SecretVersion(\"secret-version-data\", {\n secret: secret.name,\n secretData: \"secret-data\",\n});\nconst instance = new gcp.sql.DatabaseInstance(\"instance\", {\n name: \"cloudrun-sql\",\n region: \"us-central1\",\n databaseVersion: \"MYSQL_5_7\",\n settings: {\n tier: \"db-f1-micro\",\n },\n deletionProtection: true,\n});\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n ingress: \"INGRESS_TRAFFIC_ALL\",\n template: {\n scaling: {\n maxInstanceCount: 2,\n },\n volumes: [{\n name: \"cloudsql\",\n cloudSqlInstance: {\n instances: [instance.connectionName],\n },\n }],\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n envs: [\n {\n name: \"FOO\",\n value: \"bar\",\n },\n {\n name: \"SECRET_ENV_VAR\",\n valueSource: {\n secretKeyRef: {\n secret: secret.secretId,\n version: \"1\",\n },\n },\n },\n ],\n volumeMounts: [{\n name: \"cloudsql\",\n mountPath: \"/cloudsql\",\n }],\n }],\n },\n traffics: [{\n type: \"TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\",\n percent: 100,\n }],\n}, {\n dependsOn: [secret_version_data],\n});\nconst project = gcp.organizations.getProject({});\nconst secret_access = new gcp.secretmanager.SecretIamMember(\"secret-access\", {\n secretId: secret.id,\n role: \"roles/secretmanager.secretAccessor\",\n member: project.then(project =\u003e `serviceAccount:${project.number}-compute@developer.gserviceaccount.com`),\n}, {\n dependsOn: [secret],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nsecret = gcp.secretmanager.Secret(\"secret\",\n secret_id=\"secret-1\",\n replication={\n \"auto\": {},\n })\nsecret_version_data = gcp.secretmanager.SecretVersion(\"secret-version-data\",\n secret=secret.name,\n secret_data=\"secret-data\")\ninstance = gcp.sql.DatabaseInstance(\"instance\",\n name=\"cloudrun-sql\",\n region=\"us-central1\",\n database_version=\"MYSQL_5_7\",\n settings={\n \"tier\": \"db-f1-micro\",\n },\n deletion_protection=True)\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n ingress=\"INGRESS_TRAFFIC_ALL\",\n template={\n \"scaling\": {\n \"max_instance_count\": 2,\n },\n \"volumes\": [{\n \"name\": \"cloudsql\",\n \"cloud_sql_instance\": {\n \"instances\": [instance.connection_name],\n },\n }],\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n \"envs\": [\n {\n \"name\": \"FOO\",\n \"value\": \"bar\",\n },\n {\n \"name\": \"SECRET_ENV_VAR\",\n \"value_source\": {\n \"secret_key_ref\": {\n \"secret\": secret.secret_id,\n \"version\": \"1\",\n },\n },\n },\n ],\n \"volume_mounts\": [{\n \"name\": \"cloudsql\",\n \"mount_path\": \"/cloudsql\",\n }],\n }],\n },\n traffics=[{\n \"type\": \"TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\",\n \"percent\": 100,\n }],\n opts = pulumi.ResourceOptions(depends_on=[secret_version_data]))\nproject = gcp.organizations.get_project()\nsecret_access = gcp.secretmanager.SecretIamMember(\"secret-access\",\n secret_id=secret.id,\n role=\"roles/secretmanager.secretAccessor\",\n member=f\"serviceAccount:{project.number}-compute@developer.gserviceaccount.com\",\n opts = pulumi.ResourceOptions(depends_on=[secret]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var secret = new Gcp.SecretManager.Secret(\"secret\", new()\n {\n SecretId = \"secret-1\",\n Replication = new Gcp.SecretManager.Inputs.SecretReplicationArgs\n {\n Auto = null,\n },\n });\n\n var secret_version_data = new Gcp.SecretManager.SecretVersion(\"secret-version-data\", new()\n {\n Secret = secret.Name,\n SecretData = \"secret-data\",\n });\n\n var instance = new Gcp.Sql.DatabaseInstance(\"instance\", new()\n {\n Name = \"cloudrun-sql\",\n Region = \"us-central1\",\n DatabaseVersion = \"MYSQL_5_7\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-f1-micro\",\n },\n DeletionProtection = true,\n });\n\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Ingress = \"INGRESS_TRAFFIC_ALL\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Scaling = new Gcp.CloudRunV2.Inputs.ServiceTemplateScalingArgs\n {\n MaxInstanceCount = 2,\n },\n Volumes = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeArgs\n {\n Name = \"cloudsql\",\n CloudSqlInstance = new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeCloudSqlInstanceArgs\n {\n Instances = new[]\n {\n instance.ConnectionName,\n },\n },\n },\n },\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n Envs = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerEnvArgs\n {\n Name = \"FOO\",\n Value = \"bar\",\n },\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerEnvArgs\n {\n Name = \"SECRET_ENV_VAR\",\n ValueSource = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerEnvValueSourceArgs\n {\n SecretKeyRef = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerEnvValueSourceSecretKeyRefArgs\n {\n Secret = secret.SecretId,\n Version = \"1\",\n },\n },\n },\n },\n VolumeMounts = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerVolumeMountArgs\n {\n Name = \"cloudsql\",\n MountPath = \"/cloudsql\",\n },\n },\n },\n },\n },\n Traffics = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTrafficArgs\n {\n Type = \"TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\",\n Percent = 100,\n },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n secret_version_data,\n },\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var secret_access = new Gcp.SecretManager.SecretIamMember(\"secret-access\", new()\n {\n SecretId = secret.Id,\n Role = \"roles/secretmanager.secretAccessor\",\n Member = $\"serviceAccount:{project.Apply(getProjectResult =\u003e getProjectResult.Number)}-compute@developer.gserviceaccount.com\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n secret,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/secretmanager\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tsecret, err := secretmanager.NewSecret(ctx, \"secret\", \u0026secretmanager.SecretArgs{\n\t\t\tSecretId: pulumi.String(\"secret-1\"),\n\t\t\tReplication: \u0026secretmanager.SecretReplicationArgs{\n\t\t\t\tAuto: nil,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = secretmanager.NewSecretVersion(ctx, \"secret-version-data\", \u0026secretmanager.SecretVersionArgs{\n\t\t\tSecret: secret.Name,\n\t\t\tSecretData: pulumi.String(\"secret-data\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinstance, err := sql.NewDatabaseInstance(ctx, \"instance\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-sql\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_5_7\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-f1-micro\"),\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tIngress: pulumi.String(\"INGRESS_TRAFFIC_ALL\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tScaling: \u0026cloudrunv2.ServiceTemplateScalingArgs{\n\t\t\t\t\tMaxInstanceCount: pulumi.Int(2),\n\t\t\t\t},\n\t\t\t\tVolumes: cloudrunv2.ServiceTemplateVolumeArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVolumeArgs{\n\t\t\t\t\t\tName: pulumi.String(\"cloudsql\"),\n\t\t\t\t\t\tCloudSqlInstance: \u0026cloudrunv2.ServiceTemplateVolumeCloudSqlInstanceArgs{\n\t\t\t\t\t\t\tInstances: pulumi.StringArray{\n\t\t\t\t\t\t\t\tinstance.ConnectionName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t\tEnvs: cloudrunv2.ServiceTemplateContainerEnvArray{\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerEnvArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"FOO\"),\n\t\t\t\t\t\t\t\tValue: pulumi.String(\"bar\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerEnvArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"SECRET_ENV_VAR\"),\n\t\t\t\t\t\t\t\tValueSource: \u0026cloudrunv2.ServiceTemplateContainerEnvValueSourceArgs{\n\t\t\t\t\t\t\t\t\tSecretKeyRef: \u0026cloudrunv2.ServiceTemplateContainerEnvValueSourceSecretKeyRefArgs{\n\t\t\t\t\t\t\t\t\t\tSecret: secret.SecretId,\n\t\t\t\t\t\t\t\t\t\tVersion: pulumi.String(\"1\"),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVolumeMounts: cloudrunv2.ServiceTemplateContainerVolumeMountArray{\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerVolumeMountArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"cloudsql\"),\n\t\t\t\t\t\t\t\tMountPath: pulumi.String(\"/cloudsql\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tTraffics: cloudrunv2.ServiceTrafficArray{\n\t\t\t\t\u0026cloudrunv2.ServiceTrafficArgs{\n\t\t\t\t\tType: pulumi.String(\"TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\"),\n\t\t\t\t\tPercent: pulumi.Int(100),\n\t\t\t\t},\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsecret_version_data,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = secretmanager.NewSecretIamMember(ctx, \"secret-access\", \u0026secretmanager.SecretIamMemberArgs{\n\t\t\tSecretId: secret.ID(),\n\t\t\tRole: pulumi.String(\"roles/secretmanager.secretAccessor\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:%v-compute@developer.gserviceaccount.com\", project.Number),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsecret,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.secretmanager.Secret;\nimport com.pulumi.gcp.secretmanager.SecretArgs;\nimport com.pulumi.gcp.secretmanager.inputs.SecretReplicationArgs;\nimport com.pulumi.gcp.secretmanager.inputs.SecretReplicationAutoArgs;\nimport com.pulumi.gcp.secretmanager.SecretVersion;\nimport com.pulumi.gcp.secretmanager.SecretVersionArgs;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateScalingArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTrafficArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.secretmanager.SecretIamMember;\nimport com.pulumi.gcp.secretmanager.SecretIamMemberArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var secret = new Secret(\"secret\", SecretArgs.builder()\n .secretId(\"secret-1\")\n .replication(SecretReplicationArgs.builder()\n .auto()\n .build())\n .build());\n\n var secret_version_data = new SecretVersion(\"secret-version-data\", SecretVersionArgs.builder()\n .secret(secret.name())\n .secretData(\"secret-data\")\n .build());\n\n var instance = new DatabaseInstance(\"instance\", DatabaseInstanceArgs.builder()\n .name(\"cloudrun-sql\")\n .region(\"us-central1\")\n .databaseVersion(\"MYSQL_5_7\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-f1-micro\")\n .build())\n .deletionProtection(\"true\")\n .build());\n\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .ingress(\"INGRESS_TRAFFIC_ALL\")\n .template(ServiceTemplateArgs.builder()\n .scaling(ServiceTemplateScalingArgs.builder()\n .maxInstanceCount(2)\n .build())\n .volumes(ServiceTemplateVolumeArgs.builder()\n .name(\"cloudsql\")\n .cloudSqlInstance(ServiceTemplateVolumeCloudSqlInstanceArgs.builder()\n .instances(instance.connectionName())\n .build())\n .build())\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .envs( \n ServiceTemplateContainerEnvArgs.builder()\n .name(\"FOO\")\n .value(\"bar\")\n .build(),\n ServiceTemplateContainerEnvArgs.builder()\n .name(\"SECRET_ENV_VAR\")\n .valueSource(ServiceTemplateContainerEnvValueSourceArgs.builder()\n .secretKeyRef(ServiceTemplateContainerEnvValueSourceSecretKeyRefArgs.builder()\n .secret(secret.secretId())\n .version(\"1\")\n .build())\n .build())\n .build())\n .volumeMounts(ServiceTemplateContainerVolumeMountArgs.builder()\n .name(\"cloudsql\")\n .mountPath(\"/cloudsql\")\n .build())\n .build())\n .build())\n .traffics(ServiceTrafficArgs.builder()\n .type(\"TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\")\n .percent(100)\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(secret_version_data)\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n var secret_access = new SecretIamMember(\"secret-access\", SecretIamMemberArgs.builder()\n .secretId(secret.id())\n .role(\"roles/secretmanager.secretAccessor\")\n .member(String.format(\"serviceAccount:%s-compute@developer.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build(), CustomResourceOptions.builder()\n .dependsOn(secret)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n ingress: INGRESS_TRAFFIC_ALL\n template:\n scaling:\n maxInstanceCount: 2\n volumes:\n - name: cloudsql\n cloudSqlInstance:\n instances:\n - ${instance.connectionName}\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n envs:\n - name: FOO\n value: bar\n - name: SECRET_ENV_VAR\n valueSource:\n secretKeyRef:\n secret: ${secret.secretId}\n version: '1'\n volumeMounts:\n - name: cloudsql\n mountPath: /cloudsql\n traffics:\n - type: TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST\n percent: 100\n options:\n dependson:\n - ${[\"secret-version-data\"]}\n secret:\n type: gcp:secretmanager:Secret\n properties:\n secretId: secret-1\n replication:\n auto: {}\n secret-version-data:\n type: gcp:secretmanager:SecretVersion\n properties:\n secret: ${secret.name}\n secretData: secret-data\n secret-access:\n type: gcp:secretmanager:SecretIamMember\n properties:\n secretId: ${secret.id}\n role: roles/secretmanager.secretAccessor\n member: serviceAccount:${project.number}-compute@developer.gserviceaccount.com\n options:\n dependson:\n - ${secret}\n instance:\n type: gcp:sql:DatabaseInstance\n properties:\n name: cloudrun-sql\n region: us-central1\n databaseVersion: MYSQL_5_7\n settings:\n tier: db-f1-micro\n deletionProtection: 'true'\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Vpcaccess\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst customTestNetwork = new gcp.compute.Network(\"custom_test\", {\n name: \"run-network\",\n autoCreateSubnetworks: false,\n});\nconst customTest = new gcp.compute.Subnetwork(\"custom_test\", {\n name: \"run-subnetwork\",\n ipCidrRange: \"10.2.0.0/28\",\n region: \"us-central1\",\n network: customTestNetwork.id,\n});\nconst connector = new gcp.vpcaccess.Connector(\"connector\", {\n name: \"run-vpc\",\n subnet: {\n name: customTest.name,\n },\n machineType: \"e2-standard-4\",\n minInstances: 2,\n maxInstances: 3,\n region: \"us-central1\",\n});\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n template: {\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n }],\n vpcAccess: {\n connector: connector.id,\n egress: \"ALL_TRAFFIC\",\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncustom_test_network = gcp.compute.Network(\"custom_test\",\n name=\"run-network\",\n auto_create_subnetworks=False)\ncustom_test = gcp.compute.Subnetwork(\"custom_test\",\n name=\"run-subnetwork\",\n ip_cidr_range=\"10.2.0.0/28\",\n region=\"us-central1\",\n network=custom_test_network.id)\nconnector = gcp.vpcaccess.Connector(\"connector\",\n name=\"run-vpc\",\n subnet={\n \"name\": custom_test.name,\n },\n machine_type=\"e2-standard-4\",\n min_instances=2,\n max_instances=3,\n region=\"us-central1\")\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n template={\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n }],\n \"vpc_access\": {\n \"connector\": connector.id,\n \"egress\": \"ALL_TRAFFIC\",\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var customTestNetwork = new Gcp.Compute.Network(\"custom_test\", new()\n {\n Name = \"run-network\",\n AutoCreateSubnetworks = false,\n });\n\n var customTest = new Gcp.Compute.Subnetwork(\"custom_test\", new()\n {\n Name = \"run-subnetwork\",\n IpCidrRange = \"10.2.0.0/28\",\n Region = \"us-central1\",\n Network = customTestNetwork.Id,\n });\n\n var connector = new Gcp.VpcAccess.Connector(\"connector\", new()\n {\n Name = \"run-vpc\",\n Subnet = new Gcp.VpcAccess.Inputs.ConnectorSubnetArgs\n {\n Name = customTest.Name,\n },\n MachineType = \"e2-standard-4\",\n MinInstances = 2,\n MaxInstances = 3,\n Region = \"us-central1\",\n });\n\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n },\n },\n VpcAccess = new Gcp.CloudRunV2.Inputs.ServiceTemplateVpcAccessArgs\n {\n Connector = connector.Id,\n Egress = \"ALL_TRAFFIC\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/vpcaccess\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcustomTestNetwork, err := compute.NewNetwork(ctx, \"custom_test\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"run-network\"),\n\t\t\tAutoCreateSubnetworks: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcustomTest, err := compute.NewSubnetwork(ctx, \"custom_test\", \u0026compute.SubnetworkArgs{\n\t\t\tName: pulumi.String(\"run-subnetwork\"),\n\t\t\tIpCidrRange: pulumi.String(\"10.2.0.0/28\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tNetwork: customTestNetwork.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconnector, err := vpcaccess.NewConnector(ctx, \"connector\", \u0026vpcaccess.ConnectorArgs{\n\t\t\tName: pulumi.String(\"run-vpc\"),\n\t\t\tSubnet: \u0026vpcaccess.ConnectorSubnetArgs{\n\t\t\t\tName: customTest.Name,\n\t\t\t},\n\t\t\tMachineType: pulumi.String(\"e2-standard-4\"),\n\t\t\tMinInstances: pulumi.Int(2),\n\t\t\tMaxInstances: pulumi.Int(3),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVpcAccess: \u0026cloudrunv2.ServiceTemplateVpcAccessArgs{\n\t\t\t\t\tConnector: connector.ID(),\n\t\t\t\t\tEgress: pulumi.String(\"ALL_TRAFFIC\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.compute.Subnetwork;\nimport com.pulumi.gcp.compute.SubnetworkArgs;\nimport com.pulumi.gcp.vpcaccess.Connector;\nimport com.pulumi.gcp.vpcaccess.ConnectorArgs;\nimport com.pulumi.gcp.vpcaccess.inputs.ConnectorSubnetArgs;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateVpcAccessArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var customTestNetwork = new Network(\"customTestNetwork\", NetworkArgs.builder()\n .name(\"run-network\")\n .autoCreateSubnetworks(false)\n .build());\n\n var customTest = new Subnetwork(\"customTest\", SubnetworkArgs.builder()\n .name(\"run-subnetwork\")\n .ipCidrRange(\"10.2.0.0/28\")\n .region(\"us-central1\")\n .network(customTestNetwork.id())\n .build());\n\n var connector = new Connector(\"connector\", ConnectorArgs.builder()\n .name(\"run-vpc\")\n .subnet(ConnectorSubnetArgs.builder()\n .name(customTest.name())\n .build())\n .machineType(\"e2-standard-4\")\n .minInstances(2)\n .maxInstances(3)\n .region(\"us-central1\")\n .build());\n\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .template(ServiceTemplateArgs.builder()\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .build())\n .vpcAccess(ServiceTemplateVpcAccessArgs.builder()\n .connector(connector.id())\n .egress(\"ALL_TRAFFIC\")\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n template:\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n vpcAccess:\n connector: ${connector.id}\n egress: ALL_TRAFFIC\n connector:\n type: gcp:vpcaccess:Connector\n properties:\n name: run-vpc\n subnet:\n name: ${customTest.name}\n machineType: e2-standard-4\n minInstances: 2\n maxInstances: 3\n region: us-central1\n customTest:\n type: gcp:compute:Subnetwork\n name: custom_test\n properties:\n name: run-subnetwork\n ipCidrRange: 10.2.0.0/28\n region: us-central1\n network: ${customTestNetwork.id}\n customTestNetwork:\n type: gcp:compute:Network\n name: custom_test\n properties:\n name: run-network\n autoCreateSubnetworks: false\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Directvpc\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n launchStage: \"GA\",\n template: {\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n }],\n vpcAccess: {\n networkInterfaces: [{\n network: \"default\",\n subnetwork: \"default\",\n tags: [\n \"tag1\",\n \"tag2\",\n \"tag3\",\n ],\n }],\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n launch_stage=\"GA\",\n template={\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n }],\n \"vpc_access\": {\n \"network_interfaces\": [{\n \"network\": \"default\",\n \"subnetwork\": \"default\",\n \"tags\": [\n \"tag1\",\n \"tag2\",\n \"tag3\",\n ],\n }],\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n LaunchStage = \"GA\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n },\n },\n VpcAccess = new Gcp.CloudRunV2.Inputs.ServiceTemplateVpcAccessArgs\n {\n NetworkInterfaces = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVpcAccessNetworkInterfaceArgs\n {\n Network = \"default\",\n Subnetwork = \"default\",\n Tags = new[]\n {\n \"tag1\",\n \"tag2\",\n \"tag3\",\n },\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tLaunchStage: pulumi.String(\"GA\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVpcAccess: \u0026cloudrunv2.ServiceTemplateVpcAccessArgs{\n\t\t\t\t\tNetworkInterfaces: cloudrunv2.ServiceTemplateVpcAccessNetworkInterfaceArray{\n\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVpcAccessNetworkInterfaceArgs{\n\t\t\t\t\t\t\tNetwork: pulumi.String(\"default\"),\n\t\t\t\t\t\t\tSubnetwork: pulumi.String(\"default\"),\n\t\t\t\t\t\t\tTags: pulumi.StringArray{\n\t\t\t\t\t\t\t\tpulumi.String(\"tag1\"),\n\t\t\t\t\t\t\t\tpulumi.String(\"tag2\"),\n\t\t\t\t\t\t\t\tpulumi.String(\"tag3\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateVpcAccessArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .launchStage(\"GA\")\n .template(ServiceTemplateArgs.builder()\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .build())\n .vpcAccess(ServiceTemplateVpcAccessArgs.builder()\n .networkInterfaces(ServiceTemplateVpcAccessNetworkInterfaceArgs.builder()\n .network(\"default\")\n .subnetwork(\"default\")\n .tags( \n \"tag1\",\n \"tag2\",\n \"tag3\")\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n launchStage: GA\n template:\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n vpcAccess:\n networkInterfaces:\n - network: default\n subnetwork: default\n tags:\n - tag1\n - tag2\n - tag3\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Probes\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n template: {\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n startupProbe: {\n initialDelaySeconds: 0,\n timeoutSeconds: 1,\n periodSeconds: 3,\n failureThreshold: 1,\n tcpSocket: {\n port: 8080,\n },\n },\n livenessProbe: {\n httpGet: {\n path: \"/\",\n },\n },\n }],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n template={\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n \"startup_probe\": {\n \"initial_delay_seconds\": 0,\n \"timeout_seconds\": 1,\n \"period_seconds\": 3,\n \"failure_threshold\": 1,\n \"tcp_socket\": {\n \"port\": 8080,\n },\n },\n \"liveness_probe\": {\n \"http_get\": {\n \"path\": \"/\",\n },\n },\n }],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n StartupProbe = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerStartupProbeArgs\n {\n InitialDelaySeconds = 0,\n TimeoutSeconds = 1,\n PeriodSeconds = 3,\n FailureThreshold = 1,\n TcpSocket = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerStartupProbeTcpSocketArgs\n {\n Port = 8080,\n },\n },\n LivenessProbe = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerLivenessProbeArgs\n {\n HttpGet = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerLivenessProbeHttpGetArgs\n {\n Path = \"/\",\n },\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t\tStartupProbe: \u0026cloudrunv2.ServiceTemplateContainerStartupProbeArgs{\n\t\t\t\t\t\t\tInitialDelaySeconds: pulumi.Int(0),\n\t\t\t\t\t\t\tTimeoutSeconds: pulumi.Int(1),\n\t\t\t\t\t\t\tPeriodSeconds: pulumi.Int(3),\n\t\t\t\t\t\t\tFailureThreshold: pulumi.Int(1),\n\t\t\t\t\t\t\tTcpSocket: \u0026cloudrunv2.ServiceTemplateContainerStartupProbeTcpSocketArgs{\n\t\t\t\t\t\t\t\tPort: pulumi.Int(8080),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tLivenessProbe: \u0026cloudrunv2.ServiceTemplateContainerLivenessProbeArgs{\n\t\t\t\t\t\t\tHttpGet: \u0026cloudrunv2.ServiceTemplateContainerLivenessProbeHttpGetArgs{\n\t\t\t\t\t\t\t\tPath: pulumi.String(\"/\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .template(ServiceTemplateArgs.builder()\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .startupProbe(ServiceTemplateContainerStartupProbeArgs.builder()\n .initialDelaySeconds(0)\n .timeoutSeconds(1)\n .periodSeconds(3)\n .failureThreshold(1)\n .tcpSocket(ServiceTemplateContainerStartupProbeTcpSocketArgs.builder()\n .port(8080)\n .build())\n .build())\n .livenessProbe(ServiceTemplateContainerLivenessProbeArgs.builder()\n .httpGet(ServiceTemplateContainerLivenessProbeHttpGetArgs.builder()\n .path(\"/\")\n .build())\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n template:\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n startupProbe:\n initialDelaySeconds: 0\n timeoutSeconds: 1\n periodSeconds: 3\n failureThreshold: 1\n tcpSocket:\n port: 8080\n livenessProbe:\n httpGet:\n path: /\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Secret\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst secret = new gcp.secretmanager.Secret(\"secret\", {\n secretId: \"secret-1\",\n replication: {\n auto: {},\n },\n});\nconst secret_version_data = new gcp.secretmanager.SecretVersion(\"secret-version-data\", {\n secret: secret.name,\n secretData: \"secret-data\",\n});\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n ingress: \"INGRESS_TRAFFIC_ALL\",\n template: {\n volumes: [{\n name: \"a-volume\",\n secret: {\n secret: secret.secretId,\n defaultMode: 292,\n items: [{\n version: \"1\",\n path: \"my-secret\",\n }],\n },\n }],\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n volumeMounts: [{\n name: \"a-volume\",\n mountPath: \"/secrets\",\n }],\n }],\n },\n}, {\n dependsOn: [secret_version_data],\n});\nconst project = gcp.organizations.getProject({});\nconst secret_access = new gcp.secretmanager.SecretIamMember(\"secret-access\", {\n secretId: secret.id,\n role: \"roles/secretmanager.secretAccessor\",\n member: project.then(project =\u003e `serviceAccount:${project.number}-compute@developer.gserviceaccount.com`),\n}, {\n dependsOn: [secret],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nsecret = gcp.secretmanager.Secret(\"secret\",\n secret_id=\"secret-1\",\n replication={\n \"auto\": {},\n })\nsecret_version_data = gcp.secretmanager.SecretVersion(\"secret-version-data\",\n secret=secret.name,\n secret_data=\"secret-data\")\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n ingress=\"INGRESS_TRAFFIC_ALL\",\n template={\n \"volumes\": [{\n \"name\": \"a-volume\",\n \"secret\": {\n \"secret\": secret.secret_id,\n \"default_mode\": 292,\n \"items\": [{\n \"version\": \"1\",\n \"path\": \"my-secret\",\n }],\n },\n }],\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n \"volume_mounts\": [{\n \"name\": \"a-volume\",\n \"mount_path\": \"/secrets\",\n }],\n }],\n },\n opts = pulumi.ResourceOptions(depends_on=[secret_version_data]))\nproject = gcp.organizations.get_project()\nsecret_access = gcp.secretmanager.SecretIamMember(\"secret-access\",\n secret_id=secret.id,\n role=\"roles/secretmanager.secretAccessor\",\n member=f\"serviceAccount:{project.number}-compute@developer.gserviceaccount.com\",\n opts = pulumi.ResourceOptions(depends_on=[secret]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var secret = new Gcp.SecretManager.Secret(\"secret\", new()\n {\n SecretId = \"secret-1\",\n Replication = new Gcp.SecretManager.Inputs.SecretReplicationArgs\n {\n Auto = null,\n },\n });\n\n var secret_version_data = new Gcp.SecretManager.SecretVersion(\"secret-version-data\", new()\n {\n Secret = secret.Name,\n SecretData = \"secret-data\",\n });\n\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Ingress = \"INGRESS_TRAFFIC_ALL\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Volumes = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeArgs\n {\n Name = \"a-volume\",\n Secret = new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeSecretArgs\n {\n Secret = secret.SecretId,\n DefaultMode = 292,\n Items = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeSecretItemArgs\n {\n Version = \"1\",\n Path = \"my-secret\",\n },\n },\n },\n },\n },\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n VolumeMounts = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerVolumeMountArgs\n {\n Name = \"a-volume\",\n MountPath = \"/secrets\",\n },\n },\n },\n },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n secret_version_data,\n },\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var secret_access = new Gcp.SecretManager.SecretIamMember(\"secret-access\", new()\n {\n SecretId = secret.Id,\n Role = \"roles/secretmanager.secretAccessor\",\n Member = $\"serviceAccount:{project.Apply(getProjectResult =\u003e getProjectResult.Number)}-compute@developer.gserviceaccount.com\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n secret,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/secretmanager\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tsecret, err := secretmanager.NewSecret(ctx, \"secret\", \u0026secretmanager.SecretArgs{\n\t\t\tSecretId: pulumi.String(\"secret-1\"),\n\t\t\tReplication: \u0026secretmanager.SecretReplicationArgs{\n\t\t\t\tAuto: nil,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = secretmanager.NewSecretVersion(ctx, \"secret-version-data\", \u0026secretmanager.SecretVersionArgs{\n\t\t\tSecret: secret.Name,\n\t\t\tSecretData: pulumi.String(\"secret-data\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tIngress: pulumi.String(\"INGRESS_TRAFFIC_ALL\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tVolumes: cloudrunv2.ServiceTemplateVolumeArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVolumeArgs{\n\t\t\t\t\t\tName: pulumi.String(\"a-volume\"),\n\t\t\t\t\t\tSecret: \u0026cloudrunv2.ServiceTemplateVolumeSecretArgs{\n\t\t\t\t\t\t\tSecret: secret.SecretId,\n\t\t\t\t\t\t\tDefaultMode: pulumi.Int(292),\n\t\t\t\t\t\t\tItems: cloudrunv2.ServiceTemplateVolumeSecretItemArray{\n\t\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVolumeSecretItemArgs{\n\t\t\t\t\t\t\t\t\tVersion: pulumi.String(\"1\"),\n\t\t\t\t\t\t\t\t\tPath: pulumi.String(\"my-secret\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t\tVolumeMounts: cloudrunv2.ServiceTemplateContainerVolumeMountArray{\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerVolumeMountArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"a-volume\"),\n\t\t\t\t\t\t\t\tMountPath: pulumi.String(\"/secrets\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsecret_version_data,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = secretmanager.NewSecretIamMember(ctx, \"secret-access\", \u0026secretmanager.SecretIamMemberArgs{\n\t\t\tSecretId: secret.ID(),\n\t\t\tRole: pulumi.String(\"roles/secretmanager.secretAccessor\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:%v-compute@developer.gserviceaccount.com\", project.Number),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsecret,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.secretmanager.Secret;\nimport com.pulumi.gcp.secretmanager.SecretArgs;\nimport com.pulumi.gcp.secretmanager.inputs.SecretReplicationArgs;\nimport com.pulumi.gcp.secretmanager.inputs.SecretReplicationAutoArgs;\nimport com.pulumi.gcp.secretmanager.SecretVersion;\nimport com.pulumi.gcp.secretmanager.SecretVersionArgs;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.secretmanager.SecretIamMember;\nimport com.pulumi.gcp.secretmanager.SecretIamMemberArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var secret = new Secret(\"secret\", SecretArgs.builder()\n .secretId(\"secret-1\")\n .replication(SecretReplicationArgs.builder()\n .auto()\n .build())\n .build());\n\n var secret_version_data = new SecretVersion(\"secret-version-data\", SecretVersionArgs.builder()\n .secret(secret.name())\n .secretData(\"secret-data\")\n .build());\n\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .ingress(\"INGRESS_TRAFFIC_ALL\")\n .template(ServiceTemplateArgs.builder()\n .volumes(ServiceTemplateVolumeArgs.builder()\n .name(\"a-volume\")\n .secret(ServiceTemplateVolumeSecretArgs.builder()\n .secret(secret.secretId())\n .defaultMode(292)\n .items(ServiceTemplateVolumeSecretItemArgs.builder()\n .version(\"1\")\n .path(\"my-secret\")\n .build())\n .build())\n .build())\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .volumeMounts(ServiceTemplateContainerVolumeMountArgs.builder()\n .name(\"a-volume\")\n .mountPath(\"/secrets\")\n .build())\n .build())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(secret_version_data)\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n var secret_access = new SecretIamMember(\"secret-access\", SecretIamMemberArgs.builder()\n .secretId(secret.id())\n .role(\"roles/secretmanager.secretAccessor\")\n .member(String.format(\"serviceAccount:%s-compute@developer.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build(), CustomResourceOptions.builder()\n .dependsOn(secret)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n ingress: INGRESS_TRAFFIC_ALL\n template:\n volumes:\n - name: a-volume\n secret:\n secret: ${secret.secretId}\n defaultMode: 292\n items:\n - version: '1'\n path: my-secret\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n volumeMounts:\n - name: a-volume\n mountPath: /secrets\n options:\n dependson:\n - ${[\"secret-version-data\"]}\n secret:\n type: gcp:secretmanager:Secret\n properties:\n secretId: secret-1\n replication:\n auto: {}\n secret-version-data:\n type: gcp:secretmanager:SecretVersion\n properties:\n secret: ${secret.name}\n secretData: secret-data\n secret-access:\n type: gcp:secretmanager:SecretIamMember\n properties:\n secretId: ${secret.id}\n role: roles/secretmanager.secretAccessor\n member: serviceAccount:${project.number}-compute@developer.gserviceaccount.com\n options:\n dependson:\n - ${secret}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Multicontainer\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n launchStage: \"BETA\",\n ingress: \"INGRESS_TRAFFIC_ALL\",\n template: {\n containers: [\n {\n name: \"hello-1\",\n ports: {\n containerPort: 8080,\n },\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n dependsOns: [\"hello-2\"],\n volumeMounts: [{\n name: \"empty-dir-volume\",\n mountPath: \"/mnt\",\n }],\n },\n {\n name: \"hello-2\",\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n envs: [{\n name: \"PORT\",\n value: \"8081\",\n }],\n startupProbe: {\n httpGet: {\n port: 8081,\n },\n },\n },\n ],\n volumes: [{\n name: \"empty-dir-volume\",\n emptyDir: {\n medium: \"MEMORY\",\n sizeLimit: \"256Mi\",\n },\n }],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n launch_stage=\"BETA\",\n ingress=\"INGRESS_TRAFFIC_ALL\",\n template={\n \"containers\": [\n {\n \"name\": \"hello-1\",\n \"ports\": {\n \"container_port\": 8080,\n },\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n \"depends_ons\": [\"hello-2\"],\n \"volume_mounts\": [{\n \"name\": \"empty-dir-volume\",\n \"mount_path\": \"/mnt\",\n }],\n },\n {\n \"name\": \"hello-2\",\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n \"envs\": [{\n \"name\": \"PORT\",\n \"value\": \"8081\",\n }],\n \"startup_probe\": {\n \"http_get\": {\n \"port\": 8081,\n },\n },\n },\n ],\n \"volumes\": [{\n \"name\": \"empty-dir-volume\",\n \"empty_dir\": {\n \"medium\": \"MEMORY\",\n \"size_limit\": \"256Mi\",\n },\n }],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n LaunchStage = \"BETA\",\n Ingress = \"INGRESS_TRAFFIC_ALL\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Name = \"hello-1\",\n Ports = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerPortsArgs\n {\n ContainerPort = 8080,\n },\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n DependsOns = new[]\n {\n \"hello-2\",\n },\n VolumeMounts = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerVolumeMountArgs\n {\n Name = \"empty-dir-volume\",\n MountPath = \"/mnt\",\n },\n },\n },\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Name = \"hello-2\",\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n Envs = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerEnvArgs\n {\n Name = \"PORT\",\n Value = \"8081\",\n },\n },\n StartupProbe = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerStartupProbeArgs\n {\n HttpGet = new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerStartupProbeHttpGetArgs\n {\n Port = 8081,\n },\n },\n },\n },\n Volumes = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeArgs\n {\n Name = \"empty-dir-volume\",\n EmptyDir = new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeEmptyDirArgs\n {\n Medium = \"MEMORY\",\n SizeLimit = \"256Mi\",\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tLaunchStage: pulumi.String(\"BETA\"),\n\t\t\tIngress: pulumi.String(\"INGRESS_TRAFFIC_ALL\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tName: pulumi.String(\"hello-1\"),\n\t\t\t\t\t\tPorts: \u0026cloudrunv2.ServiceTemplateContainerPortsArgs{\n\t\t\t\t\t\t\tContainerPort: pulumi.Int(8080),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t\tDependsOns: pulumi.StringArray{\n\t\t\t\t\t\t\tpulumi.String(\"hello-2\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVolumeMounts: cloudrunv2.ServiceTemplateContainerVolumeMountArray{\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerVolumeMountArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"empty-dir-volume\"),\n\t\t\t\t\t\t\t\tMountPath: pulumi.String(\"/mnt\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tName: pulumi.String(\"hello-2\"),\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t\tEnvs: cloudrunv2.ServiceTemplateContainerEnvArray{\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerEnvArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"PORT\"),\n\t\t\t\t\t\t\t\tValue: pulumi.String(\"8081\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStartupProbe: \u0026cloudrunv2.ServiceTemplateContainerStartupProbeArgs{\n\t\t\t\t\t\t\tHttpGet: \u0026cloudrunv2.ServiceTemplateContainerStartupProbeHttpGetArgs{\n\t\t\t\t\t\t\t\tPort: pulumi.Int(8081),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVolumes: cloudrunv2.ServiceTemplateVolumeArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVolumeArgs{\n\t\t\t\t\t\tName: pulumi.String(\"empty-dir-volume\"),\n\t\t\t\t\t\tEmptyDir: \u0026cloudrunv2.ServiceTemplateVolumeEmptyDirArgs{\n\t\t\t\t\t\t\tMedium: pulumi.String(\"MEMORY\"),\n\t\t\t\t\t\t\tSizeLimit: pulumi.String(\"256Mi\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .launchStage(\"BETA\")\n .ingress(\"INGRESS_TRAFFIC_ALL\")\n .template(ServiceTemplateArgs.builder()\n .containers( \n ServiceTemplateContainerArgs.builder()\n .name(\"hello-1\")\n .ports(ServiceTemplateContainerPortsArgs.builder()\n .containerPort(8080)\n .build())\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .dependsOns(\"hello-2\")\n .volumeMounts(ServiceTemplateContainerVolumeMountArgs.builder()\n .name(\"empty-dir-volume\")\n .mountPath(\"/mnt\")\n .build())\n .build(),\n ServiceTemplateContainerArgs.builder()\n .name(\"hello-2\")\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .envs(ServiceTemplateContainerEnvArgs.builder()\n .name(\"PORT\")\n .value(\"8081\")\n .build())\n .startupProbe(ServiceTemplateContainerStartupProbeArgs.builder()\n .httpGet(ServiceTemplateContainerStartupProbeHttpGetArgs.builder()\n .port(8081)\n .build())\n .build())\n .build())\n .volumes(ServiceTemplateVolumeArgs.builder()\n .name(\"empty-dir-volume\")\n .emptyDir(ServiceTemplateVolumeEmptyDirArgs.builder()\n .medium(\"MEMORY\")\n .sizeLimit(\"256Mi\")\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n launchStage: BETA\n ingress: INGRESS_TRAFFIC_ALL\n template:\n containers:\n - name: hello-1\n ports:\n containerPort: 8080\n image: us-docker.pkg.dev/cloudrun/container/hello\n dependsOns:\n - hello-2\n volumeMounts:\n - name: empty-dir-volume\n mountPath: /mnt\n - name: hello-2\n image: us-docker.pkg.dev/cloudrun/container/hello\n envs:\n - name: PORT\n value: '8081'\n startupProbe:\n httpGet:\n port: 8081\n volumes:\n - name: empty-dir-volume\n emptyDir:\n medium: MEMORY\n sizeLimit: 256Mi\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Mount Gcs\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst defaultBucket = new gcp.storage.Bucket(\"default\", {\n name: \"cloudrun-service\",\n location: \"US\",\n});\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n template: {\n executionEnvironment: \"EXECUTION_ENVIRONMENT_GEN2\",\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n volumeMounts: [{\n name: \"bucket\",\n mountPath: \"/var/www\",\n }],\n }],\n volumes: [{\n name: \"bucket\",\n gcs: {\n bucket: defaultBucket.name,\n readOnly: false,\n },\n }],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault_bucket = gcp.storage.Bucket(\"default\",\n name=\"cloudrun-service\",\n location=\"US\")\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n template={\n \"execution_environment\": \"EXECUTION_ENVIRONMENT_GEN2\",\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n \"volume_mounts\": [{\n \"name\": \"bucket\",\n \"mount_path\": \"/var/www\",\n }],\n }],\n \"volumes\": [{\n \"name\": \"bucket\",\n \"gcs\": {\n \"bucket\": default_bucket.name,\n \"read_only\": False,\n },\n }],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var defaultBucket = new Gcp.Storage.Bucket(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"US\",\n });\n\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n ExecutionEnvironment = \"EXECUTION_ENVIRONMENT_GEN2\",\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n VolumeMounts = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerVolumeMountArgs\n {\n Name = \"bucket\",\n MountPath = \"/var/www\",\n },\n },\n },\n },\n Volumes = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeArgs\n {\n Name = \"bucket\",\n Gcs = new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeGcsArgs\n {\n Bucket = defaultBucket.Name,\n ReadOnly = false,\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tdefaultBucket, err := storage.NewBucket(ctx, \"default\", \u0026storage.BucketArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tExecutionEnvironment: pulumi.String(\"EXECUTION_ENVIRONMENT_GEN2\"),\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t\tVolumeMounts: cloudrunv2.ServiceTemplateContainerVolumeMountArray{\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerVolumeMountArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"bucket\"),\n\t\t\t\t\t\t\t\tMountPath: pulumi.String(\"/var/www\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVolumes: cloudrunv2.ServiceTemplateVolumeArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVolumeArgs{\n\t\t\t\t\t\tName: pulumi.String(\"bucket\"),\n\t\t\t\t\t\tGcs: \u0026cloudrunv2.ServiceTemplateVolumeGcsArgs{\n\t\t\t\t\t\t\tBucket: defaultBucket.Name,\n\t\t\t\t\t\t\tReadOnly: pulumi.Bool(false),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.storage.Bucket;\nimport com.pulumi.gcp.storage.BucketArgs;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var defaultBucket = new Bucket(\"defaultBucket\", BucketArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"US\")\n .build());\n\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .template(ServiceTemplateArgs.builder()\n .executionEnvironment(\"EXECUTION_ENVIRONMENT_GEN2\")\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .volumeMounts(ServiceTemplateContainerVolumeMountArgs.builder()\n .name(\"bucket\")\n .mountPath(\"/var/www\")\n .build())\n .build())\n .volumes(ServiceTemplateVolumeArgs.builder()\n .name(\"bucket\")\n .gcs(ServiceTemplateVolumeGcsArgs.builder()\n .bucket(defaultBucket.name())\n .readOnly(false)\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n template:\n executionEnvironment: EXECUTION_ENVIRONMENT_GEN2\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n volumeMounts:\n - name: bucket\n mountPath: /var/www\n volumes:\n - name: bucket\n gcs:\n bucket: ${defaultBucket.name}\n readOnly: false\n defaultBucket:\n type: gcp:storage:Bucket\n name: default\n properties:\n name: cloudrun-service\n location: US\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Mount Nfs\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst defaultInstance = new gcp.filestore.Instance(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1-b\",\n tier: \"BASIC_HDD\",\n fileShares: {\n capacityGb: 1024,\n name: \"share1\",\n },\n networks: [{\n network: \"default\",\n modes: [\"MODE_IPV4\"],\n }],\n});\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n location: \"us-central1\",\n deletionProtection: false,\n ingress: \"INGRESS_TRAFFIC_ALL\",\n template: {\n executionEnvironment: \"EXECUTION_ENVIRONMENT_GEN2\",\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello:latest\",\n volumeMounts: [{\n name: \"nfs\",\n mountPath: \"/mnt/nfs/filestore\",\n }],\n }],\n vpcAccess: {\n networkInterfaces: [{\n network: \"default\",\n subnetwork: \"default\",\n }],\n },\n volumes: [{\n name: \"nfs\",\n nfs: {\n server: defaultInstance.networks.apply(networks =\u003e networks[0].ipAddresses?.[0]),\n path: \"/share1\",\n readOnly: false,\n },\n }],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault_instance = gcp.filestore.Instance(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1-b\",\n tier=\"BASIC_HDD\",\n file_shares={\n \"capacity_gb\": 1024,\n \"name\": \"share1\",\n },\n networks=[{\n \"network\": \"default\",\n \"modes\": [\"MODE_IPV4\"],\n }])\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n location=\"us-central1\",\n deletion_protection=False,\n ingress=\"INGRESS_TRAFFIC_ALL\",\n template={\n \"execution_environment\": \"EXECUTION_ENVIRONMENT_GEN2\",\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello:latest\",\n \"volume_mounts\": [{\n \"name\": \"nfs\",\n \"mount_path\": \"/mnt/nfs/filestore\",\n }],\n }],\n \"vpc_access\": {\n \"network_interfaces\": [{\n \"network\": \"default\",\n \"subnetwork\": \"default\",\n }],\n },\n \"volumes\": [{\n \"name\": \"nfs\",\n \"nfs\": {\n \"server\": default_instance.networks[0].ip_addresses[0],\n \"path\": \"/share1\",\n \"read_only\": False,\n },\n }],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var defaultInstance = new Gcp.Filestore.Instance(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1-b\",\n Tier = \"BASIC_HDD\",\n FileShares = new Gcp.Filestore.Inputs.InstanceFileSharesArgs\n {\n CapacityGb = 1024,\n Name = \"share1\",\n },\n Networks = new[]\n {\n new Gcp.Filestore.Inputs.InstanceNetworkArgs\n {\n Network = \"default\",\n Modes = new[]\n {\n \"MODE_IPV4\",\n },\n },\n },\n });\n\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n Location = \"us-central1\",\n DeletionProtection = false,\n Ingress = \"INGRESS_TRAFFIC_ALL\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n ExecutionEnvironment = \"EXECUTION_ENVIRONMENT_GEN2\",\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello:latest\",\n VolumeMounts = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerVolumeMountArgs\n {\n Name = \"nfs\",\n MountPath = \"/mnt/nfs/filestore\",\n },\n },\n },\n },\n VpcAccess = new Gcp.CloudRunV2.Inputs.ServiceTemplateVpcAccessArgs\n {\n NetworkInterfaces = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVpcAccessNetworkInterfaceArgs\n {\n Network = \"default\",\n Subnetwork = \"default\",\n },\n },\n },\n Volumes = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeArgs\n {\n Name = \"nfs\",\n Nfs = new Gcp.CloudRunV2.Inputs.ServiceTemplateVolumeNfsArgs\n {\n Server = defaultInstance.Networks.Apply(networks =\u003e networks[0].IpAddresses[0]),\n Path = \"/share1\",\n ReadOnly = false,\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/filestore\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tdefaultInstance, err := filestore.NewInstance(ctx, \"default\", \u0026filestore.InstanceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1-b\"),\n\t\t\tTier: pulumi.String(\"BASIC_HDD\"),\n\t\t\tFileShares: \u0026filestore.InstanceFileSharesArgs{\n\t\t\t\tCapacityGb: pulumi.Int(1024),\n\t\t\t\tName: pulumi.String(\"share1\"),\n\t\t\t},\n\t\t\tNetworks: filestore.InstanceNetworkArray{\n\t\t\t\t\u0026filestore.InstanceNetworkArgs{\n\t\t\t\t\tNetwork: pulumi.String(\"default\"),\n\t\t\t\t\tModes: pulumi.StringArray{\n\t\t\t\t\t\tpulumi.String(\"MODE_IPV4\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tIngress: pulumi.String(\"INGRESS_TRAFFIC_ALL\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tExecutionEnvironment: pulumi.String(\"EXECUTION_ENVIRONMENT_GEN2\"),\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello:latest\"),\n\t\t\t\t\t\tVolumeMounts: cloudrunv2.ServiceTemplateContainerVolumeMountArray{\n\t\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerVolumeMountArgs{\n\t\t\t\t\t\t\t\tName: pulumi.String(\"nfs\"),\n\t\t\t\t\t\t\t\tMountPath: pulumi.String(\"/mnt/nfs/filestore\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVpcAccess: \u0026cloudrunv2.ServiceTemplateVpcAccessArgs{\n\t\t\t\t\tNetworkInterfaces: cloudrunv2.ServiceTemplateVpcAccessNetworkInterfaceArray{\n\t\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVpcAccessNetworkInterfaceArgs{\n\t\t\t\t\t\t\tNetwork: pulumi.String(\"default\"),\n\t\t\t\t\t\t\tSubnetwork: pulumi.String(\"default\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVolumes: cloudrunv2.ServiceTemplateVolumeArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateVolumeArgs{\n\t\t\t\t\t\tName: pulumi.String(\"nfs\"),\n\t\t\t\t\t\tNfs: \u0026cloudrunv2.ServiceTemplateVolumeNfsArgs{\n\t\t\t\t\t\t\tServer: defaultInstance.Networks.ApplyT(func(networks []filestore.InstanceNetwork) (*string, error) {\n\t\t\t\t\t\t\t\treturn \u0026networks[0].IpAddresses[0], nil\n\t\t\t\t\t\t\t}).(pulumi.StringPtrOutput),\n\t\t\t\t\t\t\tPath: pulumi.String(\"/share1\"),\n\t\t\t\t\t\t\tReadOnly: pulumi.Bool(false),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.filestore.Instance;\nimport com.pulumi.gcp.filestore.InstanceArgs;\nimport com.pulumi.gcp.filestore.inputs.InstanceFileSharesArgs;\nimport com.pulumi.gcp.filestore.inputs.InstanceNetworkArgs;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateVpcAccessArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var defaultInstance = new Instance(\"defaultInstance\", InstanceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1-b\")\n .tier(\"BASIC_HDD\")\n .fileShares(InstanceFileSharesArgs.builder()\n .capacityGb(1024)\n .name(\"share1\")\n .build())\n .networks(InstanceNetworkArgs.builder()\n .network(\"default\")\n .modes(\"MODE_IPV4\")\n .build())\n .build());\n\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .location(\"us-central1\")\n .deletionProtection(false)\n .ingress(\"INGRESS_TRAFFIC_ALL\")\n .template(ServiceTemplateArgs.builder()\n .executionEnvironment(\"EXECUTION_ENVIRONMENT_GEN2\")\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello:latest\")\n .volumeMounts(ServiceTemplateContainerVolumeMountArgs.builder()\n .name(\"nfs\")\n .mountPath(\"/mnt/nfs/filestore\")\n .build())\n .build())\n .vpcAccess(ServiceTemplateVpcAccessArgs.builder()\n .networkInterfaces(ServiceTemplateVpcAccessNetworkInterfaceArgs.builder()\n .network(\"default\")\n .subnetwork(\"default\")\n .build())\n .build())\n .volumes(ServiceTemplateVolumeArgs.builder()\n .name(\"nfs\")\n .nfs(ServiceTemplateVolumeNfsArgs.builder()\n .server(defaultInstance.networks().applyValue(networks -\u003e networks[0].ipAddresses()[0]))\n .path(\"/share1\")\n .readOnly(false)\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n location: us-central1\n deletionProtection: false\n ingress: INGRESS_TRAFFIC_ALL\n template:\n executionEnvironment: EXECUTION_ENVIRONMENT_GEN2\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello:latest\n volumeMounts:\n - name: nfs\n mountPath: /mnt/nfs/filestore\n vpcAccess:\n networkInterfaces:\n - network: default\n subnetwork: default\n volumes:\n - name: nfs\n nfs:\n server: ${defaultInstance.networks[0].ipAddresses[0]}\n path: /share1\n readOnly: false\n defaultInstance:\n type: gcp:filestore:Instance\n name: default\n properties:\n name: cloudrun-service\n location: us-central1-b\n tier: BASIC_HDD\n fileShares:\n capacityGb: 1024\n name: share1\n networks:\n - network: default\n modes:\n - MODE_IPV4\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloudrunv2 Service Mesh\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as time from \"@pulumi/time\";\n\nconst mesh = new gcp.networkservices.Mesh(\"mesh\", {name: \"network-services-mesh\"});\nconst waitForMesh = new time.index.Sleep(\"wait_for_mesh\", {createDuration: \"1m\"}, {\n dependsOn: [mesh],\n});\nconst _default = new gcp.cloudrunv2.Service(\"default\", {\n name: \"cloudrun-service\",\n deletionProtection: false,\n location: \"us-central1\",\n launchStage: \"BETA\",\n template: {\n containers: [{\n image: \"us-docker.pkg.dev/cloudrun/container/hello\",\n }],\n serviceMesh: {\n mesh: mesh.id,\n },\n },\n}, {\n dependsOn: [waitForMesh],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_time as time\n\nmesh = gcp.networkservices.Mesh(\"mesh\", name=\"network-services-mesh\")\nwait_for_mesh = time.index.Sleep(\"wait_for_mesh\", create_duration=1m,\nopts = pulumi.ResourceOptions(depends_on=[mesh]))\ndefault = gcp.cloudrunv2.Service(\"default\",\n name=\"cloudrun-service\",\n deletion_protection=False,\n location=\"us-central1\",\n launch_stage=\"BETA\",\n template={\n \"containers\": [{\n \"image\": \"us-docker.pkg.dev/cloudrun/container/hello\",\n }],\n \"service_mesh\": {\n \"mesh\": mesh.id,\n },\n },\n opts = pulumi.ResourceOptions(depends_on=[wait_for_mesh]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Time = Pulumi.Time;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var mesh = new Gcp.NetworkServices.Mesh(\"mesh\", new()\n {\n Name = \"network-services-mesh\",\n });\n\n var waitForMesh = new Time.Index.Sleep(\"wait_for_mesh\", new()\n {\n CreateDuration = \"1m\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n mesh,\n },\n });\n\n var @default = new Gcp.CloudRunV2.Service(\"default\", new()\n {\n Name = \"cloudrun-service\",\n DeletionProtection = false,\n Location = \"us-central1\",\n LaunchStage = \"BETA\",\n Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs\n {\n Containers = new[]\n {\n new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs\n {\n Image = \"us-docker.pkg.dev/cloudrun/container/hello\",\n },\n },\n ServiceMesh = new Gcp.CloudRunV2.Inputs.ServiceTemplateServiceMeshArgs\n {\n Mesh = mesh.Id,\n },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n waitForMesh,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networkservices\"\n\t\"github.com/pulumi/pulumi-time/sdk/go/time\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tmesh, err := networkservices.NewMesh(ctx, \"mesh\", \u0026networkservices.MeshArgs{\n\t\t\tName: pulumi.String(\"network-services-mesh\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\twaitForMesh, err := time.NewSleep(ctx, \"wait_for_mesh\", \u0026time.SleepArgs{\n\t\t\tCreateDuration: \"1m\",\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tmesh,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudrunv2.NewService(ctx, \"default\", \u0026cloudrunv2.ServiceArgs{\n\t\t\tName: pulumi.String(\"cloudrun-service\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tLaunchStage: pulumi.String(\"BETA\"),\n\t\t\tTemplate: \u0026cloudrunv2.ServiceTemplateArgs{\n\t\t\t\tContainers: cloudrunv2.ServiceTemplateContainerArray{\n\t\t\t\t\t\u0026cloudrunv2.ServiceTemplateContainerArgs{\n\t\t\t\t\t\tImage: pulumi.String(\"us-docker.pkg.dev/cloudrun/container/hello\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tServiceMesh: \u0026cloudrunv2.ServiceTemplateServiceMeshArgs{\n\t\t\t\t\tMesh: mesh.ID(),\n\t\t\t\t},\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\twaitForMesh,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.networkservices.Mesh;\nimport com.pulumi.gcp.networkservices.MeshArgs;\nimport com.pulumi.time.sleep;\nimport com.pulumi.time.SleepArgs;\nimport com.pulumi.gcp.cloudrunv2.Service;\nimport com.pulumi.gcp.cloudrunv2.ServiceArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;\nimport com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateServiceMeshArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var mesh = new Mesh(\"mesh\", MeshArgs.builder()\n .name(\"network-services-mesh\")\n .build());\n\n var waitForMesh = new Sleep(\"waitForMesh\", SleepArgs.builder()\n .createDuration(\"1m\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(mesh)\n .build());\n\n var default_ = new Service(\"default\", ServiceArgs.builder()\n .name(\"cloudrun-service\")\n .deletionProtection(false)\n .location(\"us-central1\")\n .launchStage(\"BETA\")\n .template(ServiceTemplateArgs.builder()\n .containers(ServiceTemplateContainerArgs.builder()\n .image(\"us-docker.pkg.dev/cloudrun/container/hello\")\n .build())\n .serviceMesh(ServiceTemplateServiceMeshArgs.builder()\n .mesh(mesh.id())\n .build())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(waitForMesh)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudrunv2:Service\n properties:\n name: cloudrun-service\n deletionProtection: false\n location: us-central1\n launchStage: BETA\n template:\n containers:\n - image: us-docker.pkg.dev/cloudrun/container/hello\n serviceMesh:\n mesh: ${mesh.id}\n options:\n dependson:\n - ${waitForMesh}\n waitForMesh:\n type: time:sleep\n name: wait_for_mesh\n properties:\n createDuration: 1m\n options:\n dependson:\n - ${mesh}\n mesh:\n type: gcp:networkservices:Mesh\n properties:\n name: network-services-mesh\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nService can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/services/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, Service can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:cloudrunv2/service:Service default projects/{{project}}/locations/{{location}}/services/{{name}}\n```\n\n```sh\n$ pulumi import gcp:cloudrunv2/service:Service default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:cloudrunv2/service:Service default {{location}}/{{name}}\n```\n\n", "properties": { "annotations": { "type": "object", @@ -142025,12 +143558,16 @@ } }, "gcp:cloudtasks/queue:Queue": { - "description": "A named resource to which messages are sent by publishers.\n\n\n\n## Example Usage\n\n### Queue Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.cloudtasks.Queue(\"default\", {\n name: \"cloud-tasks-queue-test\",\n location: \"us-central1\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.cloudtasks.Queue(\"default\",\n name=\"cloud-tasks-queue-test\",\n location=\"us-central1\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.CloudTasks.Queue(\"default\", new()\n {\n Name = \"cloud-tasks-queue-test\",\n Location = \"us-central1\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudtasks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudtasks.NewQueue(ctx, \"default\", \u0026cloudtasks.QueueArgs{\n\t\t\tName: pulumi.String(\"cloud-tasks-queue-test\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudtasks.Queue;\nimport com.pulumi.gcp.cloudtasks.QueueArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Queue(\"default\", QueueArgs.builder()\n .name(\"cloud-tasks-queue-test\")\n .location(\"us-central1\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudtasks:Queue\n properties:\n name: cloud-tasks-queue-test\n location: us-central1\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloud Tasks Queue Advanced\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst advancedConfiguration = new gcp.cloudtasks.Queue(\"advanced_configuration\", {\n name: \"instance-name\",\n location: \"us-central1\",\n appEngineRoutingOverride: {\n service: \"worker\",\n version: \"1.0\",\n instance: \"test\",\n },\n rateLimits: {\n maxConcurrentDispatches: 3,\n maxDispatchesPerSecond: 2,\n },\n retryConfig: {\n maxAttempts: 5,\n maxRetryDuration: \"4s\",\n maxBackoff: \"3s\",\n minBackoff: \"2s\",\n maxDoublings: 1,\n },\n stackdriverLoggingConfig: {\n samplingRatio: 0.9,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nadvanced_configuration = gcp.cloudtasks.Queue(\"advanced_configuration\",\n name=\"instance-name\",\n location=\"us-central1\",\n app_engine_routing_override={\n \"service\": \"worker\",\n \"version\": \"1.0\",\n \"instance\": \"test\",\n },\n rate_limits={\n \"max_concurrent_dispatches\": 3,\n \"max_dispatches_per_second\": 2,\n },\n retry_config={\n \"max_attempts\": 5,\n \"max_retry_duration\": \"4s\",\n \"max_backoff\": \"3s\",\n \"min_backoff\": \"2s\",\n \"max_doublings\": 1,\n },\n stackdriver_logging_config={\n \"sampling_ratio\": 0.9,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var advancedConfiguration = new Gcp.CloudTasks.Queue(\"advanced_configuration\", new()\n {\n Name = \"instance-name\",\n Location = \"us-central1\",\n AppEngineRoutingOverride = new Gcp.CloudTasks.Inputs.QueueAppEngineRoutingOverrideArgs\n {\n Service = \"worker\",\n Version = \"1.0\",\n Instance = \"test\",\n },\n RateLimits = new Gcp.CloudTasks.Inputs.QueueRateLimitsArgs\n {\n MaxConcurrentDispatches = 3,\n MaxDispatchesPerSecond = 2,\n },\n RetryConfig = new Gcp.CloudTasks.Inputs.QueueRetryConfigArgs\n {\n MaxAttempts = 5,\n MaxRetryDuration = \"4s\",\n MaxBackoff = \"3s\",\n MinBackoff = \"2s\",\n MaxDoublings = 1,\n },\n StackdriverLoggingConfig = new Gcp.CloudTasks.Inputs.QueueStackdriverLoggingConfigArgs\n {\n SamplingRatio = 0.9,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudtasks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudtasks.NewQueue(ctx, \"advanced_configuration\", \u0026cloudtasks.QueueArgs{\n\t\t\tName: pulumi.String(\"instance-name\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tAppEngineRoutingOverride: \u0026cloudtasks.QueueAppEngineRoutingOverrideArgs{\n\t\t\t\tService: pulumi.String(\"worker\"),\n\t\t\t\tVersion: pulumi.String(\"1.0\"),\n\t\t\t\tInstance: pulumi.String(\"test\"),\n\t\t\t},\n\t\t\tRateLimits: \u0026cloudtasks.QueueRateLimitsArgs{\n\t\t\t\tMaxConcurrentDispatches: pulumi.Int(3),\n\t\t\t\tMaxDispatchesPerSecond: pulumi.Float64(2),\n\t\t\t},\n\t\t\tRetryConfig: \u0026cloudtasks.QueueRetryConfigArgs{\n\t\t\t\tMaxAttempts: pulumi.Int(5),\n\t\t\t\tMaxRetryDuration: pulumi.String(\"4s\"),\n\t\t\t\tMaxBackoff: pulumi.String(\"3s\"),\n\t\t\t\tMinBackoff: pulumi.String(\"2s\"),\n\t\t\t\tMaxDoublings: pulumi.Int(1),\n\t\t\t},\n\t\t\tStackdriverLoggingConfig: \u0026cloudtasks.QueueStackdriverLoggingConfigArgs{\n\t\t\t\tSamplingRatio: pulumi.Float64(0.9),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudtasks.Queue;\nimport com.pulumi.gcp.cloudtasks.QueueArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueAppEngineRoutingOverrideArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueRateLimitsArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueRetryConfigArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueStackdriverLoggingConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var advancedConfiguration = new Queue(\"advancedConfiguration\", QueueArgs.builder()\n .name(\"instance-name\")\n .location(\"us-central1\")\n .appEngineRoutingOverride(QueueAppEngineRoutingOverrideArgs.builder()\n .service(\"worker\")\n .version(\"1.0\")\n .instance(\"test\")\n .build())\n .rateLimits(QueueRateLimitsArgs.builder()\n .maxConcurrentDispatches(3)\n .maxDispatchesPerSecond(2)\n .build())\n .retryConfig(QueueRetryConfigArgs.builder()\n .maxAttempts(5)\n .maxRetryDuration(\"4s\")\n .maxBackoff(\"3s\")\n .minBackoff(\"2s\")\n .maxDoublings(1)\n .build())\n .stackdriverLoggingConfig(QueueStackdriverLoggingConfigArgs.builder()\n .samplingRatio(0.9)\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n advancedConfiguration:\n type: gcp:cloudtasks:Queue\n name: advanced_configuration\n properties:\n name: instance-name\n location: us-central1\n appEngineRoutingOverride:\n service: worker\n version: '1.0'\n instance: test\n rateLimits:\n maxConcurrentDispatches: 3\n maxDispatchesPerSecond: 2\n retryConfig:\n maxAttempts: 5\n maxRetryDuration: 4s\n maxBackoff: 3s\n minBackoff: 2s\n maxDoublings: 1\n stackdriverLoggingConfig:\n samplingRatio: 0.9\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nQueue can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/queues/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, Queue can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:cloudtasks/queue:Queue default projects/{{project}}/locations/{{location}}/queues/{{name}}\n```\n\n```sh\n$ pulumi import gcp:cloudtasks/queue:Queue default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:cloudtasks/queue:Queue default {{location}}/{{name}}\n```\n\n", + "description": "A named resource to which messages are sent by publishers.\n\n\n\n## Example Usage\n\n### Queue Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.cloudtasks.Queue(\"default\", {\n name: \"cloud-tasks-queue-test\",\n location: \"us-central1\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.cloudtasks.Queue(\"default\",\n name=\"cloud-tasks-queue-test\",\n location=\"us-central1\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.CloudTasks.Queue(\"default\", new()\n {\n Name = \"cloud-tasks-queue-test\",\n Location = \"us-central1\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudtasks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudtasks.NewQueue(ctx, \"default\", \u0026cloudtasks.QueueArgs{\n\t\t\tName: pulumi.String(\"cloud-tasks-queue-test\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudtasks.Queue;\nimport com.pulumi.gcp.cloudtasks.QueueArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Queue(\"default\", QueueArgs.builder()\n .name(\"cloud-tasks-queue-test\")\n .location(\"us-central1\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:cloudtasks:Queue\n properties:\n name: cloud-tasks-queue-test\n location: us-central1\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloud Tasks Queue Advanced\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst advancedConfiguration = new gcp.cloudtasks.Queue(\"advanced_configuration\", {\n name: \"instance-name\",\n location: \"us-central1\",\n appEngineRoutingOverride: {\n service: \"worker\",\n version: \"1.0\",\n instance: \"test\",\n },\n rateLimits: {\n maxConcurrentDispatches: 3,\n maxDispatchesPerSecond: 2,\n },\n retryConfig: {\n maxAttempts: 5,\n maxRetryDuration: \"4s\",\n maxBackoff: \"3s\",\n minBackoff: \"2s\",\n maxDoublings: 1,\n },\n stackdriverLoggingConfig: {\n samplingRatio: 0.9,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nadvanced_configuration = gcp.cloudtasks.Queue(\"advanced_configuration\",\n name=\"instance-name\",\n location=\"us-central1\",\n app_engine_routing_override={\n \"service\": \"worker\",\n \"version\": \"1.0\",\n \"instance\": \"test\",\n },\n rate_limits={\n \"max_concurrent_dispatches\": 3,\n \"max_dispatches_per_second\": 2,\n },\n retry_config={\n \"max_attempts\": 5,\n \"max_retry_duration\": \"4s\",\n \"max_backoff\": \"3s\",\n \"min_backoff\": \"2s\",\n \"max_doublings\": 1,\n },\n stackdriver_logging_config={\n \"sampling_ratio\": 0.9,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var advancedConfiguration = new Gcp.CloudTasks.Queue(\"advanced_configuration\", new()\n {\n Name = \"instance-name\",\n Location = \"us-central1\",\n AppEngineRoutingOverride = new Gcp.CloudTasks.Inputs.QueueAppEngineRoutingOverrideArgs\n {\n Service = \"worker\",\n Version = \"1.0\",\n Instance = \"test\",\n },\n RateLimits = new Gcp.CloudTasks.Inputs.QueueRateLimitsArgs\n {\n MaxConcurrentDispatches = 3,\n MaxDispatchesPerSecond = 2,\n },\n RetryConfig = new Gcp.CloudTasks.Inputs.QueueRetryConfigArgs\n {\n MaxAttempts = 5,\n MaxRetryDuration = \"4s\",\n MaxBackoff = \"3s\",\n MinBackoff = \"2s\",\n MaxDoublings = 1,\n },\n StackdriverLoggingConfig = new Gcp.CloudTasks.Inputs.QueueStackdriverLoggingConfigArgs\n {\n SamplingRatio = 0.9,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudtasks\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudtasks.NewQueue(ctx, \"advanced_configuration\", \u0026cloudtasks.QueueArgs{\n\t\t\tName: pulumi.String(\"instance-name\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tAppEngineRoutingOverride: \u0026cloudtasks.QueueAppEngineRoutingOverrideArgs{\n\t\t\t\tService: pulumi.String(\"worker\"),\n\t\t\t\tVersion: pulumi.String(\"1.0\"),\n\t\t\t\tInstance: pulumi.String(\"test\"),\n\t\t\t},\n\t\t\tRateLimits: \u0026cloudtasks.QueueRateLimitsArgs{\n\t\t\t\tMaxConcurrentDispatches: pulumi.Int(3),\n\t\t\t\tMaxDispatchesPerSecond: pulumi.Float64(2),\n\t\t\t},\n\t\t\tRetryConfig: \u0026cloudtasks.QueueRetryConfigArgs{\n\t\t\t\tMaxAttempts: pulumi.Int(5),\n\t\t\t\tMaxRetryDuration: pulumi.String(\"4s\"),\n\t\t\t\tMaxBackoff: pulumi.String(\"3s\"),\n\t\t\t\tMinBackoff: pulumi.String(\"2s\"),\n\t\t\t\tMaxDoublings: pulumi.Int(1),\n\t\t\t},\n\t\t\tStackdriverLoggingConfig: \u0026cloudtasks.QueueStackdriverLoggingConfigArgs{\n\t\t\t\tSamplingRatio: pulumi.Float64(0.9),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudtasks.Queue;\nimport com.pulumi.gcp.cloudtasks.QueueArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueAppEngineRoutingOverrideArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueRateLimitsArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueRetryConfigArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueStackdriverLoggingConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var advancedConfiguration = new Queue(\"advancedConfiguration\", QueueArgs.builder()\n .name(\"instance-name\")\n .location(\"us-central1\")\n .appEngineRoutingOverride(QueueAppEngineRoutingOverrideArgs.builder()\n .service(\"worker\")\n .version(\"1.0\")\n .instance(\"test\")\n .build())\n .rateLimits(QueueRateLimitsArgs.builder()\n .maxConcurrentDispatches(3)\n .maxDispatchesPerSecond(2)\n .build())\n .retryConfig(QueueRetryConfigArgs.builder()\n .maxAttempts(5)\n .maxRetryDuration(\"4s\")\n .maxBackoff(\"3s\")\n .minBackoff(\"2s\")\n .maxDoublings(1)\n .build())\n .stackdriverLoggingConfig(QueueStackdriverLoggingConfigArgs.builder()\n .samplingRatio(0.9)\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n advancedConfiguration:\n type: gcp:cloudtasks:Queue\n name: advanced_configuration\n properties:\n name: instance-name\n location: us-central1\n appEngineRoutingOverride:\n service: worker\n version: '1.0'\n instance: test\n rateLimits:\n maxConcurrentDispatches: 3\n maxDispatchesPerSecond: 2\n retryConfig:\n maxAttempts: 5\n maxRetryDuration: 4s\n maxBackoff: 3s\n minBackoff: 2s\n maxDoublings: 1\n stackdriverLoggingConfig:\n samplingRatio: 0.9\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloud Tasks Queue Http Target Oidc\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst oidcServiceAccount = new gcp.serviceaccount.Account(\"oidc_service_account\", {\n accountId: \"example-oidc\",\n displayName: \"Tasks Queue OIDC Service Account\",\n});\nconst httpTargetOidc = new gcp.cloudtasks.Queue(\"http_target_oidc\", {\n name: \"cloud-tasks-queue-http-target-oidc\",\n location: \"us-central1\",\n httpTarget: {\n httpMethod: \"POST\",\n uriOverride: {\n scheme: \"HTTPS\",\n host: \"oidc.example.com\",\n port: \"8443\",\n pathOverride: {\n path: \"/users/1234\",\n },\n queryOverride: {\n queryParams: \"qparam1=123\u0026qparam2=456\",\n },\n uriOverrideEnforceMode: \"IF_NOT_EXISTS\",\n },\n headerOverrides: [\n {\n header: {\n key: \"AddSomethingElse\",\n value: \"MyOtherValue\",\n },\n },\n {\n header: {\n key: \"AddMe\",\n value: \"MyValue\",\n },\n },\n ],\n oidcToken: {\n serviceAccountEmail: oidcServiceAccount.email,\n audience: \"https://oidc.example.com\",\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\noidc_service_account = gcp.serviceaccount.Account(\"oidc_service_account\",\n account_id=\"example-oidc\",\n display_name=\"Tasks Queue OIDC Service Account\")\nhttp_target_oidc = gcp.cloudtasks.Queue(\"http_target_oidc\",\n name=\"cloud-tasks-queue-http-target-oidc\",\n location=\"us-central1\",\n http_target={\n \"http_method\": \"POST\",\n \"uri_override\": {\n \"scheme\": \"HTTPS\",\n \"host\": \"oidc.example.com\",\n \"port\": \"8443\",\n \"path_override\": {\n \"path\": \"/users/1234\",\n },\n \"query_override\": {\n \"query_params\": \"qparam1=123\u0026qparam2=456\",\n },\n \"uri_override_enforce_mode\": \"IF_NOT_EXISTS\",\n },\n \"header_overrides\": [\n {\n \"header\": {\n \"key\": \"AddSomethingElse\",\n \"value\": \"MyOtherValue\",\n },\n },\n {\n \"header\": {\n \"key\": \"AddMe\",\n \"value\": \"MyValue\",\n },\n },\n ],\n \"oidc_token\": {\n \"service_account_email\": oidc_service_account.email,\n \"audience\": \"https://oidc.example.com\",\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var oidcServiceAccount = new Gcp.ServiceAccount.Account(\"oidc_service_account\", new()\n {\n AccountId = \"example-oidc\",\n DisplayName = \"Tasks Queue OIDC Service Account\",\n });\n\n var httpTargetOidc = new Gcp.CloudTasks.Queue(\"http_target_oidc\", new()\n {\n Name = \"cloud-tasks-queue-http-target-oidc\",\n Location = \"us-central1\",\n HttpTarget = new Gcp.CloudTasks.Inputs.QueueHttpTargetArgs\n {\n HttpMethod = \"POST\",\n UriOverride = new Gcp.CloudTasks.Inputs.QueueHttpTargetUriOverrideArgs\n {\n Scheme = \"HTTPS\",\n Host = \"oidc.example.com\",\n Port = \"8443\",\n PathOverride = new Gcp.CloudTasks.Inputs.QueueHttpTargetUriOverridePathOverrideArgs\n {\n Path = \"/users/1234\",\n },\n QueryOverride = new Gcp.CloudTasks.Inputs.QueueHttpTargetUriOverrideQueryOverrideArgs\n {\n QueryParams = \"qparam1=123\u0026qparam2=456\",\n },\n UriOverrideEnforceMode = \"IF_NOT_EXISTS\",\n },\n HeaderOverrides = new[]\n {\n new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideArgs\n {\n Header = new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideHeaderArgs\n {\n Key = \"AddSomethingElse\",\n Value = \"MyOtherValue\",\n },\n },\n new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideArgs\n {\n Header = new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideHeaderArgs\n {\n Key = \"AddMe\",\n Value = \"MyValue\",\n },\n },\n },\n OidcToken = new Gcp.CloudTasks.Inputs.QueueHttpTargetOidcTokenArgs\n {\n ServiceAccountEmail = oidcServiceAccount.Email,\n Audience = \"https://oidc.example.com\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudtasks\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\toidcServiceAccount, err := serviceaccount.NewAccount(ctx, \"oidc_service_account\", \u0026serviceaccount.AccountArgs{\n\t\t\tAccountId: pulumi.String(\"example-oidc\"),\n\t\t\tDisplayName: pulumi.String(\"Tasks Queue OIDC Service Account\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudtasks.NewQueue(ctx, \"http_target_oidc\", \u0026cloudtasks.QueueArgs{\n\t\t\tName: pulumi.String(\"cloud-tasks-queue-http-target-oidc\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tHttpTarget: \u0026cloudtasks.QueueHttpTargetArgs{\n\t\t\t\tHttpMethod: pulumi.String(\"POST\"),\n\t\t\t\tUriOverride: \u0026cloudtasks.QueueHttpTargetUriOverrideArgs{\n\t\t\t\t\tScheme: pulumi.String(\"HTTPS\"),\n\t\t\t\t\tHost: pulumi.String(\"oidc.example.com\"),\n\t\t\t\t\tPort: pulumi.String(\"8443\"),\n\t\t\t\t\tPathOverride: \u0026cloudtasks.QueueHttpTargetUriOverridePathOverrideArgs{\n\t\t\t\t\t\tPath: pulumi.String(\"/users/1234\"),\n\t\t\t\t\t},\n\t\t\t\t\tQueryOverride: \u0026cloudtasks.QueueHttpTargetUriOverrideQueryOverrideArgs{\n\t\t\t\t\t\tQueryParams: pulumi.String(\"qparam1=123\u0026qparam2=456\"),\n\t\t\t\t\t},\n\t\t\t\t\tUriOverrideEnforceMode: pulumi.String(\"IF_NOT_EXISTS\"),\n\t\t\t\t},\n\t\t\t\tHeaderOverrides: cloudtasks.QueueHttpTargetHeaderOverrideArray{\n\t\t\t\t\t\u0026cloudtasks.QueueHttpTargetHeaderOverrideArgs{\n\t\t\t\t\t\tHeader: \u0026cloudtasks.QueueHttpTargetHeaderOverrideHeaderArgs{\n\t\t\t\t\t\t\tKey: pulumi.String(\"AddSomethingElse\"),\n\t\t\t\t\t\t\tValue: pulumi.String(\"MyOtherValue\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\u0026cloudtasks.QueueHttpTargetHeaderOverrideArgs{\n\t\t\t\t\t\tHeader: \u0026cloudtasks.QueueHttpTargetHeaderOverrideHeaderArgs{\n\t\t\t\t\t\t\tKey: pulumi.String(\"AddMe\"),\n\t\t\t\t\t\t\tValue: pulumi.String(\"MyValue\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tOidcToken: \u0026cloudtasks.QueueHttpTargetOidcTokenArgs{\n\t\t\t\t\tServiceAccountEmail: oidcServiceAccount.Email,\n\t\t\t\t\tAudience: pulumi.String(\"https://oidc.example.com\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.serviceaccount.Account;\nimport com.pulumi.gcp.serviceaccount.AccountArgs;\nimport com.pulumi.gcp.cloudtasks.Queue;\nimport com.pulumi.gcp.cloudtasks.QueueArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverrideArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverridePathOverrideArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverrideQueryOverrideArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetOidcTokenArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var oidcServiceAccount = new Account(\"oidcServiceAccount\", AccountArgs.builder()\n .accountId(\"example-oidc\")\n .displayName(\"Tasks Queue OIDC Service Account\")\n .build());\n\n var httpTargetOidc = new Queue(\"httpTargetOidc\", QueueArgs.builder()\n .name(\"cloud-tasks-queue-http-target-oidc\")\n .location(\"us-central1\")\n .httpTarget(QueueHttpTargetArgs.builder()\n .httpMethod(\"POST\")\n .uriOverride(QueueHttpTargetUriOverrideArgs.builder()\n .scheme(\"HTTPS\")\n .host(\"oidc.example.com\")\n .port(8443)\n .pathOverride(QueueHttpTargetUriOverridePathOverrideArgs.builder()\n .path(\"/users/1234\")\n .build())\n .queryOverride(QueueHttpTargetUriOverrideQueryOverrideArgs.builder()\n .queryParams(\"qparam1=123\u0026qparam2=456\")\n .build())\n .uriOverrideEnforceMode(\"IF_NOT_EXISTS\")\n .build())\n .headerOverrides( \n QueueHttpTargetHeaderOverrideArgs.builder()\n .header(QueueHttpTargetHeaderOverrideHeaderArgs.builder()\n .key(\"AddSomethingElse\")\n .value(\"MyOtherValue\")\n .build())\n .build(),\n QueueHttpTargetHeaderOverrideArgs.builder()\n .header(QueueHttpTargetHeaderOverrideHeaderArgs.builder()\n .key(\"AddMe\")\n .value(\"MyValue\")\n .build())\n .build())\n .oidcToken(QueueHttpTargetOidcTokenArgs.builder()\n .serviceAccountEmail(oidcServiceAccount.email())\n .audience(\"https://oidc.example.com\")\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n httpTargetOidc:\n type: gcp:cloudtasks:Queue\n name: http_target_oidc\n properties:\n name: cloud-tasks-queue-http-target-oidc\n location: us-central1\n httpTarget:\n httpMethod: POST\n uriOverride:\n scheme: HTTPS\n host: oidc.example.com\n port: 8443\n pathOverride:\n path: /users/1234\n queryOverride:\n queryParams: qparam1=123\u0026qparam2=456\n uriOverrideEnforceMode: IF_NOT_EXISTS\n headerOverrides:\n - header:\n key: AddSomethingElse\n value: MyOtherValue\n - header:\n key: AddMe\n value: MyValue\n oidcToken:\n serviceAccountEmail: ${oidcServiceAccount.email}\n audience: https://oidc.example.com\n oidcServiceAccount:\n type: gcp:serviceaccount:Account\n name: oidc_service_account\n properties:\n accountId: example-oidc\n displayName: Tasks Queue OIDC Service Account\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Cloud Tasks Queue Http Target Oauth\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst oauthServiceAccount = new gcp.serviceaccount.Account(\"oauth_service_account\", {\n accountId: \"example-oauth\",\n displayName: \"Tasks Queue OAuth Service Account\",\n});\nconst httpTargetOauth = new gcp.cloudtasks.Queue(\"http_target_oauth\", {\n name: \"cloud-tasks-queue-http-target-oauth\",\n location: \"us-central1\",\n httpTarget: {\n httpMethod: \"POST\",\n uriOverride: {\n scheme: \"HTTPS\",\n host: \"oauth.example.com\",\n port: \"8443\",\n pathOverride: {\n path: \"/users/1234\",\n },\n queryOverride: {\n queryParams: \"qparam1=123\u0026qparam2=456\",\n },\n uriOverrideEnforceMode: \"IF_NOT_EXISTS\",\n },\n headerOverrides: [\n {\n header: {\n key: \"AddSomethingElse\",\n value: \"MyOtherValue\",\n },\n },\n {\n header: {\n key: \"AddMe\",\n value: \"MyValue\",\n },\n },\n ],\n oauthToken: {\n serviceAccountEmail: oauthServiceAccount.email,\n scope: \"openid https://www.googleapis.com/auth/userinfo.email\",\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\noauth_service_account = gcp.serviceaccount.Account(\"oauth_service_account\",\n account_id=\"example-oauth\",\n display_name=\"Tasks Queue OAuth Service Account\")\nhttp_target_oauth = gcp.cloudtasks.Queue(\"http_target_oauth\",\n name=\"cloud-tasks-queue-http-target-oauth\",\n location=\"us-central1\",\n http_target={\n \"http_method\": \"POST\",\n \"uri_override\": {\n \"scheme\": \"HTTPS\",\n \"host\": \"oauth.example.com\",\n \"port\": \"8443\",\n \"path_override\": {\n \"path\": \"/users/1234\",\n },\n \"query_override\": {\n \"query_params\": \"qparam1=123\u0026qparam2=456\",\n },\n \"uri_override_enforce_mode\": \"IF_NOT_EXISTS\",\n },\n \"header_overrides\": [\n {\n \"header\": {\n \"key\": \"AddSomethingElse\",\n \"value\": \"MyOtherValue\",\n },\n },\n {\n \"header\": {\n \"key\": \"AddMe\",\n \"value\": \"MyValue\",\n },\n },\n ],\n \"oauth_token\": {\n \"service_account_email\": oauth_service_account.email,\n \"scope\": \"openid https://www.googleapis.com/auth/userinfo.email\",\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var oauthServiceAccount = new Gcp.ServiceAccount.Account(\"oauth_service_account\", new()\n {\n AccountId = \"example-oauth\",\n DisplayName = \"Tasks Queue OAuth Service Account\",\n });\n\n var httpTargetOauth = new Gcp.CloudTasks.Queue(\"http_target_oauth\", new()\n {\n Name = \"cloud-tasks-queue-http-target-oauth\",\n Location = \"us-central1\",\n HttpTarget = new Gcp.CloudTasks.Inputs.QueueHttpTargetArgs\n {\n HttpMethod = \"POST\",\n UriOverride = new Gcp.CloudTasks.Inputs.QueueHttpTargetUriOverrideArgs\n {\n Scheme = \"HTTPS\",\n Host = \"oauth.example.com\",\n Port = \"8443\",\n PathOverride = new Gcp.CloudTasks.Inputs.QueueHttpTargetUriOverridePathOverrideArgs\n {\n Path = \"/users/1234\",\n },\n QueryOverride = new Gcp.CloudTasks.Inputs.QueueHttpTargetUriOverrideQueryOverrideArgs\n {\n QueryParams = \"qparam1=123\u0026qparam2=456\",\n },\n UriOverrideEnforceMode = \"IF_NOT_EXISTS\",\n },\n HeaderOverrides = new[]\n {\n new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideArgs\n {\n Header = new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideHeaderArgs\n {\n Key = \"AddSomethingElse\",\n Value = \"MyOtherValue\",\n },\n },\n new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideArgs\n {\n Header = new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideHeaderArgs\n {\n Key = \"AddMe\",\n Value = \"MyValue\",\n },\n },\n },\n OauthToken = new Gcp.CloudTasks.Inputs.QueueHttpTargetOauthTokenArgs\n {\n ServiceAccountEmail = oauthServiceAccount.Email,\n Scope = \"openid https://www.googleapis.com/auth/userinfo.email\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudtasks\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\toauthServiceAccount, err := serviceaccount.NewAccount(ctx, \"oauth_service_account\", \u0026serviceaccount.AccountArgs{\n\t\t\tAccountId: pulumi.String(\"example-oauth\"),\n\t\t\tDisplayName: pulumi.String(\"Tasks Queue OAuth Service Account\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudtasks.NewQueue(ctx, \"http_target_oauth\", \u0026cloudtasks.QueueArgs{\n\t\t\tName: pulumi.String(\"cloud-tasks-queue-http-target-oauth\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tHttpTarget: \u0026cloudtasks.QueueHttpTargetArgs{\n\t\t\t\tHttpMethod: pulumi.String(\"POST\"),\n\t\t\t\tUriOverride: \u0026cloudtasks.QueueHttpTargetUriOverrideArgs{\n\t\t\t\t\tScheme: pulumi.String(\"HTTPS\"),\n\t\t\t\t\tHost: pulumi.String(\"oauth.example.com\"),\n\t\t\t\t\tPort: pulumi.String(\"8443\"),\n\t\t\t\t\tPathOverride: \u0026cloudtasks.QueueHttpTargetUriOverridePathOverrideArgs{\n\t\t\t\t\t\tPath: pulumi.String(\"/users/1234\"),\n\t\t\t\t\t},\n\t\t\t\t\tQueryOverride: \u0026cloudtasks.QueueHttpTargetUriOverrideQueryOverrideArgs{\n\t\t\t\t\t\tQueryParams: pulumi.String(\"qparam1=123\u0026qparam2=456\"),\n\t\t\t\t\t},\n\t\t\t\t\tUriOverrideEnforceMode: pulumi.String(\"IF_NOT_EXISTS\"),\n\t\t\t\t},\n\t\t\t\tHeaderOverrides: cloudtasks.QueueHttpTargetHeaderOverrideArray{\n\t\t\t\t\t\u0026cloudtasks.QueueHttpTargetHeaderOverrideArgs{\n\t\t\t\t\t\tHeader: \u0026cloudtasks.QueueHttpTargetHeaderOverrideHeaderArgs{\n\t\t\t\t\t\t\tKey: pulumi.String(\"AddSomethingElse\"),\n\t\t\t\t\t\t\tValue: pulumi.String(\"MyOtherValue\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\u0026cloudtasks.QueueHttpTargetHeaderOverrideArgs{\n\t\t\t\t\t\tHeader: \u0026cloudtasks.QueueHttpTargetHeaderOverrideHeaderArgs{\n\t\t\t\t\t\t\tKey: pulumi.String(\"AddMe\"),\n\t\t\t\t\t\t\tValue: pulumi.String(\"MyValue\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tOauthToken: \u0026cloudtasks.QueueHttpTargetOauthTokenArgs{\n\t\t\t\t\tServiceAccountEmail: oauthServiceAccount.Email,\n\t\t\t\t\tScope: pulumi.String(\"openid https://www.googleapis.com/auth/userinfo.email\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.serviceaccount.Account;\nimport com.pulumi.gcp.serviceaccount.AccountArgs;\nimport com.pulumi.gcp.cloudtasks.Queue;\nimport com.pulumi.gcp.cloudtasks.QueueArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverrideArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverridePathOverrideArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverrideQueryOverrideArgs;\nimport com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetOauthTokenArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var oauthServiceAccount = new Account(\"oauthServiceAccount\", AccountArgs.builder()\n .accountId(\"example-oauth\")\n .displayName(\"Tasks Queue OAuth Service Account\")\n .build());\n\n var httpTargetOauth = new Queue(\"httpTargetOauth\", QueueArgs.builder()\n .name(\"cloud-tasks-queue-http-target-oauth\")\n .location(\"us-central1\")\n .httpTarget(QueueHttpTargetArgs.builder()\n .httpMethod(\"POST\")\n .uriOverride(QueueHttpTargetUriOverrideArgs.builder()\n .scheme(\"HTTPS\")\n .host(\"oauth.example.com\")\n .port(8443)\n .pathOverride(QueueHttpTargetUriOverridePathOverrideArgs.builder()\n .path(\"/users/1234\")\n .build())\n .queryOverride(QueueHttpTargetUriOverrideQueryOverrideArgs.builder()\n .queryParams(\"qparam1=123\u0026qparam2=456\")\n .build())\n .uriOverrideEnforceMode(\"IF_NOT_EXISTS\")\n .build())\n .headerOverrides( \n QueueHttpTargetHeaderOverrideArgs.builder()\n .header(QueueHttpTargetHeaderOverrideHeaderArgs.builder()\n .key(\"AddSomethingElse\")\n .value(\"MyOtherValue\")\n .build())\n .build(),\n QueueHttpTargetHeaderOverrideArgs.builder()\n .header(QueueHttpTargetHeaderOverrideHeaderArgs.builder()\n .key(\"AddMe\")\n .value(\"MyValue\")\n .build())\n .build())\n .oauthToken(QueueHttpTargetOauthTokenArgs.builder()\n .serviceAccountEmail(oauthServiceAccount.email())\n .scope(\"openid https://www.googleapis.com/auth/userinfo.email\")\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n httpTargetOauth:\n type: gcp:cloudtasks:Queue\n name: http_target_oauth\n properties:\n name: cloud-tasks-queue-http-target-oauth\n location: us-central1\n httpTarget:\n httpMethod: POST\n uriOverride:\n scheme: HTTPS\n host: oauth.example.com\n port: 8443\n pathOverride:\n path: /users/1234\n queryOverride:\n queryParams: qparam1=123\u0026qparam2=456\n uriOverrideEnforceMode: IF_NOT_EXISTS\n headerOverrides:\n - header:\n key: AddSomethingElse\n value: MyOtherValue\n - header:\n key: AddMe\n value: MyValue\n oauthToken:\n serviceAccountEmail: ${oauthServiceAccount.email}\n scope: openid https://www.googleapis.com/auth/userinfo.email\n oauthServiceAccount:\n type: gcp:serviceaccount:Account\n name: oauth_service_account\n properties:\n accountId: example-oauth\n displayName: Tasks Queue OAuth Service Account\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nQueue can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/queues/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, Queue can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:cloudtasks/queue:Queue default projects/{{project}}/locations/{{location}}/queues/{{name}}\n```\n\n```sh\n$ pulumi import gcp:cloudtasks/queue:Queue default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:cloudtasks/queue:Queue default {{location}}/{{name}}\n```\n\n", "properties": { "appEngineRoutingOverride": { "$ref": "#/types/gcp:cloudtasks/QueueAppEngineRoutingOverride:QueueAppEngineRoutingOverride", "description": "Overrides for task-level appEngineRouting. These settings apply only\nto App Engine tasks in this queue\nStructure is documented below.\n" }, + "httpTarget": { + "$ref": "#/types/gcp:cloudtasks/QueueHttpTarget:QueueHttpTarget", + "description": "Modifies HTTP target for HTTP tasks.\nStructure is documented below.\n" + }, "location": { "type": "string", "description": "The location of the queue\n\n\n- - -\n" @@ -142068,6 +143605,10 @@ "$ref": "#/types/gcp:cloudtasks/QueueAppEngineRoutingOverride:QueueAppEngineRoutingOverride", "description": "Overrides for task-level appEngineRouting. These settings apply only\nto App Engine tasks in this queue\nStructure is documented below.\n" }, + "httpTarget": { + "$ref": "#/types/gcp:cloudtasks/QueueHttpTarget:QueueHttpTarget", + "description": "Modifies HTTP target for HTTP tasks.\nStructure is documented below.\n" + }, "location": { "type": "string", "description": "The location of the queue\n\n\n- - -\n", @@ -142106,6 +143647,10 @@ "$ref": "#/types/gcp:cloudtasks/QueueAppEngineRoutingOverride:QueueAppEngineRoutingOverride", "description": "Overrides for task-level appEngineRouting. These settings apply only\nto App Engine tasks in this queue\nStructure is documented below.\n" }, + "httpTarget": { + "$ref": "#/types/gcp:cloudtasks/QueueHttpTarget:QueueHttpTarget", + "description": "Modifies HTTP target for HTTP tasks.\nStructure is documented below.\n" + }, "location": { "type": "string", "description": "The location of the queue\n\n\n- - -\n", @@ -148059,7 +149604,7 @@ } }, "gcp:compute/healthCheck:HealthCheck": { - "description": "Health Checks determine whether instances are responsive and able to do work.\nThey are an important part of a comprehensive load balancing configuration,\nas they enable monitoring instances behind load balancers.\n\nHealth Checks poll instances at a specified interval. Instances that\ndo not respond successfully to some number of probes in a row are marked\nas unhealthy. No new connections are sent to unhealthy instances,\nthough existing connections will continue. The health check will\ncontinue to poll unhealthy instances. If an instance later responds\nsuccessfully to some number of consecutive probes, it is marked\nhealthy again and can receive new connections.\n\n~\u003e**NOTE**: Legacy HTTP(S) health checks must be used for target pool-based network\nload balancers. See the [official guide](https://cloud.google.com/load-balancing/docs/health-check-concepts#selecting_hc)\nfor choosing a type of health check.\n\n\nTo get more information about HealthCheck, see:\n\n* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/load-balancing/docs/health-checks)\n\n## Example Usage\n\n### Health Check Tcp\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst tcp_health_check = new gcp.compute.HealthCheck(\"tcp-health-check\", {\n name: \"tcp-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n tcpHealthCheck: {\n port: 80,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ntcp_health_check = gcp.compute.HealthCheck(\"tcp-health-check\",\n name=\"tcp-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n tcp_health_check={\n \"port\": 80,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var tcp_health_check = new Gcp.Compute.HealthCheck(\"tcp-health-check\", new()\n {\n Name = \"tcp-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n TcpHealthCheck = new Gcp.Compute.Inputs.HealthCheckTcpHealthCheckArgs\n {\n Port = 80,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"tcp-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"tcp-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tTcpHealthCheck: \u0026compute.HealthCheckTcpHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(80),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckTcpHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var tcp_health_check = new HealthCheck(\"tcp-health-check\", HealthCheckArgs.builder()\n .name(\"tcp-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .tcpHealthCheck(HealthCheckTcpHealthCheckArgs.builder()\n .port(\"80\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n tcp-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: tcp-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n tcpHealthCheck:\n port: '80'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Tcp Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst tcp_health_check = new gcp.compute.HealthCheck(\"tcp-health-check\", {\n name: \"tcp-health-check\",\n description: \"Health check via tcp\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n healthyThreshold: 4,\n unhealthyThreshold: 5,\n tcpHealthCheck: {\n portName: \"health-check-port\",\n portSpecification: \"USE_NAMED_PORT\",\n request: \"ARE YOU HEALTHY?\",\n proxyHeader: \"NONE\",\n response: \"I AM HEALTHY\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ntcp_health_check = gcp.compute.HealthCheck(\"tcp-health-check\",\n name=\"tcp-health-check\",\n description=\"Health check via tcp\",\n timeout_sec=1,\n check_interval_sec=1,\n healthy_threshold=4,\n unhealthy_threshold=5,\n tcp_health_check={\n \"port_name\": \"health-check-port\",\n \"port_specification\": \"USE_NAMED_PORT\",\n \"request\": \"ARE YOU HEALTHY?\",\n \"proxy_header\": \"NONE\",\n \"response\": \"I AM HEALTHY\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var tcp_health_check = new Gcp.Compute.HealthCheck(\"tcp-health-check\", new()\n {\n Name = \"tcp-health-check\",\n Description = \"Health check via tcp\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n HealthyThreshold = 4,\n UnhealthyThreshold = 5,\n TcpHealthCheck = new Gcp.Compute.Inputs.HealthCheckTcpHealthCheckArgs\n {\n PortName = \"health-check-port\",\n PortSpecification = \"USE_NAMED_PORT\",\n Request = \"ARE YOU HEALTHY?\",\n ProxyHeader = \"NONE\",\n Response = \"I AM HEALTHY\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"tcp-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"tcp-health-check\"),\n\t\t\tDescription: pulumi.String(\"Health check via tcp\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHealthyThreshold: pulumi.Int(4),\n\t\t\tUnhealthyThreshold: pulumi.Int(5),\n\t\t\tTcpHealthCheck: \u0026compute.HealthCheckTcpHealthCheckArgs{\n\t\t\t\tPortName: pulumi.String(\"health-check-port\"),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_NAMED_PORT\"),\n\t\t\t\tRequest: pulumi.String(\"ARE YOU HEALTHY?\"),\n\t\t\t\tProxyHeader: pulumi.String(\"NONE\"),\n\t\t\t\tResponse: pulumi.String(\"I AM HEALTHY\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckTcpHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var tcp_health_check = new HealthCheck(\"tcp-health-check\", HealthCheckArgs.builder()\n .name(\"tcp-health-check\")\n .description(\"Health check via tcp\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .healthyThreshold(4)\n .unhealthyThreshold(5)\n .tcpHealthCheck(HealthCheckTcpHealthCheckArgs.builder()\n .portName(\"health-check-port\")\n .portSpecification(\"USE_NAMED_PORT\")\n .request(\"ARE YOU HEALTHY?\")\n .proxyHeader(\"NONE\")\n .response(\"I AM HEALTHY\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n tcp-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: tcp-health-check\n description: Health check via tcp\n timeoutSec: 1\n checkIntervalSec: 1\n healthyThreshold: 4\n unhealthyThreshold: 5\n tcpHealthCheck:\n portName: health-check-port\n portSpecification: USE_NAMED_PORT\n request: ARE YOU HEALTHY?\n proxyHeader: NONE\n response: I AM HEALTHY\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Ssl\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst ssl_health_check = new gcp.compute.HealthCheck(\"ssl-health-check\", {\n name: \"ssl-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n sslHealthCheck: {\n port: 443,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nssl_health_check = gcp.compute.HealthCheck(\"ssl-health-check\",\n name=\"ssl-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n ssl_health_check={\n \"port\": 443,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var ssl_health_check = new Gcp.Compute.HealthCheck(\"ssl-health-check\", new()\n {\n Name = \"ssl-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n SslHealthCheck = new Gcp.Compute.Inputs.HealthCheckSslHealthCheckArgs\n {\n Port = 443,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"ssl-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"ssl-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tSslHealthCheck: \u0026compute.HealthCheckSslHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(443),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckSslHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var ssl_health_check = new HealthCheck(\"ssl-health-check\", HealthCheckArgs.builder()\n .name(\"ssl-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .sslHealthCheck(HealthCheckSslHealthCheckArgs.builder()\n .port(\"443\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n ssl-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: ssl-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n sslHealthCheck:\n port: '443'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Ssl Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst ssl_health_check = new gcp.compute.HealthCheck(\"ssl-health-check\", {\n name: \"ssl-health-check\",\n description: \"Health check via ssl\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n healthyThreshold: 4,\n unhealthyThreshold: 5,\n sslHealthCheck: {\n portName: \"health-check-port\",\n portSpecification: \"USE_NAMED_PORT\",\n request: \"ARE YOU HEALTHY?\",\n proxyHeader: \"NONE\",\n response: \"I AM HEALTHY\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nssl_health_check = gcp.compute.HealthCheck(\"ssl-health-check\",\n name=\"ssl-health-check\",\n description=\"Health check via ssl\",\n timeout_sec=1,\n check_interval_sec=1,\n healthy_threshold=4,\n unhealthy_threshold=5,\n ssl_health_check={\n \"port_name\": \"health-check-port\",\n \"port_specification\": \"USE_NAMED_PORT\",\n \"request\": \"ARE YOU HEALTHY?\",\n \"proxy_header\": \"NONE\",\n \"response\": \"I AM HEALTHY\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var ssl_health_check = new Gcp.Compute.HealthCheck(\"ssl-health-check\", new()\n {\n Name = \"ssl-health-check\",\n Description = \"Health check via ssl\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n HealthyThreshold = 4,\n UnhealthyThreshold = 5,\n SslHealthCheck = new Gcp.Compute.Inputs.HealthCheckSslHealthCheckArgs\n {\n PortName = \"health-check-port\",\n PortSpecification = \"USE_NAMED_PORT\",\n Request = \"ARE YOU HEALTHY?\",\n ProxyHeader = \"NONE\",\n Response = \"I AM HEALTHY\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"ssl-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"ssl-health-check\"),\n\t\t\tDescription: pulumi.String(\"Health check via ssl\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHealthyThreshold: pulumi.Int(4),\n\t\t\tUnhealthyThreshold: pulumi.Int(5),\n\t\t\tSslHealthCheck: \u0026compute.HealthCheckSslHealthCheckArgs{\n\t\t\t\tPortName: pulumi.String(\"health-check-port\"),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_NAMED_PORT\"),\n\t\t\t\tRequest: pulumi.String(\"ARE YOU HEALTHY?\"),\n\t\t\t\tProxyHeader: pulumi.String(\"NONE\"),\n\t\t\t\tResponse: pulumi.String(\"I AM HEALTHY\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckSslHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var ssl_health_check = new HealthCheck(\"ssl-health-check\", HealthCheckArgs.builder()\n .name(\"ssl-health-check\")\n .description(\"Health check via ssl\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .healthyThreshold(4)\n .unhealthyThreshold(5)\n .sslHealthCheck(HealthCheckSslHealthCheckArgs.builder()\n .portName(\"health-check-port\")\n .portSpecification(\"USE_NAMED_PORT\")\n .request(\"ARE YOU HEALTHY?\")\n .proxyHeader(\"NONE\")\n .response(\"I AM HEALTHY\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n ssl-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: ssl-health-check\n description: Health check via ssl\n timeoutSec: 1\n checkIntervalSec: 1\n healthyThreshold: 4\n unhealthyThreshold: 5\n sslHealthCheck:\n portName: health-check-port\n portSpecification: USE_NAMED_PORT\n request: ARE YOU HEALTHY?\n proxyHeader: NONE\n response: I AM HEALTHY\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Http\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst http_health_check = new gcp.compute.HealthCheck(\"http-health-check\", {\n name: \"http-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n httpHealthCheck: {\n port: 80,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhttp_health_check = gcp.compute.HealthCheck(\"http-health-check\",\n name=\"http-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n http_health_check={\n \"port\": 80,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var http_health_check = new Gcp.Compute.HealthCheck(\"http-health-check\", new()\n {\n Name = \"http-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n HttpHealthCheck = new Gcp.Compute.Inputs.HealthCheckHttpHealthCheckArgs\n {\n Port = 80,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"http-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"http-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHttpHealthCheck: \u0026compute.HealthCheckHttpHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(80),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckHttpHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var http_health_check = new HealthCheck(\"http-health-check\", HealthCheckArgs.builder()\n .name(\"http-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .httpHealthCheck(HealthCheckHttpHealthCheckArgs.builder()\n .port(80)\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n http-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: http-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n httpHealthCheck:\n port: 80\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Http Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst http_health_check = new gcp.compute.HealthCheck(\"http-health-check\", {\n name: \"http-health-check\",\n description: \"Health check via http\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n healthyThreshold: 4,\n unhealthyThreshold: 5,\n httpHealthCheck: {\n portName: \"health-check-port\",\n portSpecification: \"USE_NAMED_PORT\",\n host: \"1.2.3.4\",\n requestPath: \"/mypath\",\n proxyHeader: \"NONE\",\n response: \"I AM HEALTHY\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhttp_health_check = gcp.compute.HealthCheck(\"http-health-check\",\n name=\"http-health-check\",\n description=\"Health check via http\",\n timeout_sec=1,\n check_interval_sec=1,\n healthy_threshold=4,\n unhealthy_threshold=5,\n http_health_check={\n \"port_name\": \"health-check-port\",\n \"port_specification\": \"USE_NAMED_PORT\",\n \"host\": \"1.2.3.4\",\n \"request_path\": \"/mypath\",\n \"proxy_header\": \"NONE\",\n \"response\": \"I AM HEALTHY\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var http_health_check = new Gcp.Compute.HealthCheck(\"http-health-check\", new()\n {\n Name = \"http-health-check\",\n Description = \"Health check via http\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n HealthyThreshold = 4,\n UnhealthyThreshold = 5,\n HttpHealthCheck = new Gcp.Compute.Inputs.HealthCheckHttpHealthCheckArgs\n {\n PortName = \"health-check-port\",\n PortSpecification = \"USE_NAMED_PORT\",\n Host = \"1.2.3.4\",\n RequestPath = \"/mypath\",\n ProxyHeader = \"NONE\",\n Response = \"I AM HEALTHY\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"http-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"http-health-check\"),\n\t\t\tDescription: pulumi.String(\"Health check via http\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHealthyThreshold: pulumi.Int(4),\n\t\t\tUnhealthyThreshold: pulumi.Int(5),\n\t\t\tHttpHealthCheck: \u0026compute.HealthCheckHttpHealthCheckArgs{\n\t\t\t\tPortName: pulumi.String(\"health-check-port\"),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_NAMED_PORT\"),\n\t\t\t\tHost: pulumi.String(\"1.2.3.4\"),\n\t\t\t\tRequestPath: pulumi.String(\"/mypath\"),\n\t\t\t\tProxyHeader: pulumi.String(\"NONE\"),\n\t\t\t\tResponse: pulumi.String(\"I AM HEALTHY\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckHttpHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var http_health_check = new HealthCheck(\"http-health-check\", HealthCheckArgs.builder()\n .name(\"http-health-check\")\n .description(\"Health check via http\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .healthyThreshold(4)\n .unhealthyThreshold(5)\n .httpHealthCheck(HealthCheckHttpHealthCheckArgs.builder()\n .portName(\"health-check-port\")\n .portSpecification(\"USE_NAMED_PORT\")\n .host(\"1.2.3.4\")\n .requestPath(\"/mypath\")\n .proxyHeader(\"NONE\")\n .response(\"I AM HEALTHY\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n http-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: http-health-check\n description: Health check via http\n timeoutSec: 1\n checkIntervalSec: 1\n healthyThreshold: 4\n unhealthyThreshold: 5\n httpHealthCheck:\n portName: health-check-port\n portSpecification: USE_NAMED_PORT\n host: 1.2.3.4\n requestPath: /mypath\n proxyHeader: NONE\n response: I AM HEALTHY\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Https\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst https_health_check = new gcp.compute.HealthCheck(\"https-health-check\", {\n name: \"https-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n httpsHealthCheck: {\n port: 443,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhttps_health_check = gcp.compute.HealthCheck(\"https-health-check\",\n name=\"https-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n https_health_check={\n \"port\": 443,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var https_health_check = new Gcp.Compute.HealthCheck(\"https-health-check\", new()\n {\n Name = \"https-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n HttpsHealthCheck = new Gcp.Compute.Inputs.HealthCheckHttpsHealthCheckArgs\n {\n Port = 443,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"https-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"https-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHttpsHealthCheck: \u0026compute.HealthCheckHttpsHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(443),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckHttpsHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var https_health_check = new HealthCheck(\"https-health-check\", HealthCheckArgs.builder()\n .name(\"https-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .httpsHealthCheck(HealthCheckHttpsHealthCheckArgs.builder()\n .port(\"443\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n https-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: https-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n httpsHealthCheck:\n port: '443'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Https Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst https_health_check = new gcp.compute.HealthCheck(\"https-health-check\", {\n name: \"https-health-check\",\n description: \"Health check via https\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n healthyThreshold: 4,\n unhealthyThreshold: 5,\n httpsHealthCheck: {\n portName: \"health-check-port\",\n portSpecification: \"USE_NAMED_PORT\",\n host: \"1.2.3.4\",\n requestPath: \"/mypath\",\n proxyHeader: \"NONE\",\n response: \"I AM HEALTHY\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhttps_health_check = gcp.compute.HealthCheck(\"https-health-check\",\n name=\"https-health-check\",\n description=\"Health check via https\",\n timeout_sec=1,\n check_interval_sec=1,\n healthy_threshold=4,\n unhealthy_threshold=5,\n https_health_check={\n \"port_name\": \"health-check-port\",\n \"port_specification\": \"USE_NAMED_PORT\",\n \"host\": \"1.2.3.4\",\n \"request_path\": \"/mypath\",\n \"proxy_header\": \"NONE\",\n \"response\": \"I AM HEALTHY\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var https_health_check = new Gcp.Compute.HealthCheck(\"https-health-check\", new()\n {\n Name = \"https-health-check\",\n Description = \"Health check via https\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n HealthyThreshold = 4,\n UnhealthyThreshold = 5,\n HttpsHealthCheck = new Gcp.Compute.Inputs.HealthCheckHttpsHealthCheckArgs\n {\n PortName = \"health-check-port\",\n PortSpecification = \"USE_NAMED_PORT\",\n Host = \"1.2.3.4\",\n RequestPath = \"/mypath\",\n ProxyHeader = \"NONE\",\n Response = \"I AM HEALTHY\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"https-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"https-health-check\"),\n\t\t\tDescription: pulumi.String(\"Health check via https\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHealthyThreshold: pulumi.Int(4),\n\t\t\tUnhealthyThreshold: pulumi.Int(5),\n\t\t\tHttpsHealthCheck: \u0026compute.HealthCheckHttpsHealthCheckArgs{\n\t\t\t\tPortName: pulumi.String(\"health-check-port\"),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_NAMED_PORT\"),\n\t\t\t\tHost: pulumi.String(\"1.2.3.4\"),\n\t\t\t\tRequestPath: pulumi.String(\"/mypath\"),\n\t\t\t\tProxyHeader: pulumi.String(\"NONE\"),\n\t\t\t\tResponse: pulumi.String(\"I AM HEALTHY\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckHttpsHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var https_health_check = new HealthCheck(\"https-health-check\", HealthCheckArgs.builder()\n .name(\"https-health-check\")\n .description(\"Health check via https\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .healthyThreshold(4)\n .unhealthyThreshold(5)\n .httpsHealthCheck(HealthCheckHttpsHealthCheckArgs.builder()\n .portName(\"health-check-port\")\n .portSpecification(\"USE_NAMED_PORT\")\n .host(\"1.2.3.4\")\n .requestPath(\"/mypath\")\n .proxyHeader(\"NONE\")\n .response(\"I AM HEALTHY\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n https-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: https-health-check\n description: Health check via https\n timeoutSec: 1\n checkIntervalSec: 1\n healthyThreshold: 4\n unhealthyThreshold: 5\n httpsHealthCheck:\n portName: health-check-port\n portSpecification: USE_NAMED_PORT\n host: 1.2.3.4\n requestPath: /mypath\n proxyHeader: NONE\n response: I AM HEALTHY\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Http2\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst http2_health_check = new gcp.compute.HealthCheck(\"http2-health-check\", {\n name: \"http2-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n http2HealthCheck: {\n port: 443,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhttp2_health_check = gcp.compute.HealthCheck(\"http2-health-check\",\n name=\"http2-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n http2_health_check={\n \"port\": 443,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var http2_health_check = new Gcp.Compute.HealthCheck(\"http2-health-check\", new()\n {\n Name = \"http2-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n Http2HealthCheck = new Gcp.Compute.Inputs.HealthCheckHttp2HealthCheckArgs\n {\n Port = 443,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"http2-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"http2-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHttp2HealthCheck: \u0026compute.HealthCheckHttp2HealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(443),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckHttp2HealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var http2_health_check = new HealthCheck(\"http2-health-check\", HealthCheckArgs.builder()\n .name(\"http2-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .http2HealthCheck(HealthCheckHttp2HealthCheckArgs.builder()\n .port(\"443\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n http2-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: http2-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n http2HealthCheck:\n port: '443'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Http2 Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst http2_health_check = new gcp.compute.HealthCheck(\"http2-health-check\", {\n name: \"http2-health-check\",\n description: \"Health check via http2\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n healthyThreshold: 4,\n unhealthyThreshold: 5,\n http2HealthCheck: {\n portName: \"health-check-port\",\n portSpecification: \"USE_NAMED_PORT\",\n host: \"1.2.3.4\",\n requestPath: \"/mypath\",\n proxyHeader: \"NONE\",\n response: \"I AM HEALTHY\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhttp2_health_check = gcp.compute.HealthCheck(\"http2-health-check\",\n name=\"http2-health-check\",\n description=\"Health check via http2\",\n timeout_sec=1,\n check_interval_sec=1,\n healthy_threshold=4,\n unhealthy_threshold=5,\n http2_health_check={\n \"port_name\": \"health-check-port\",\n \"port_specification\": \"USE_NAMED_PORT\",\n \"host\": \"1.2.3.4\",\n \"request_path\": \"/mypath\",\n \"proxy_header\": \"NONE\",\n \"response\": \"I AM HEALTHY\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var http2_health_check = new Gcp.Compute.HealthCheck(\"http2-health-check\", new()\n {\n Name = \"http2-health-check\",\n Description = \"Health check via http2\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n HealthyThreshold = 4,\n UnhealthyThreshold = 5,\n Http2HealthCheck = new Gcp.Compute.Inputs.HealthCheckHttp2HealthCheckArgs\n {\n PortName = \"health-check-port\",\n PortSpecification = \"USE_NAMED_PORT\",\n Host = \"1.2.3.4\",\n RequestPath = \"/mypath\",\n ProxyHeader = \"NONE\",\n Response = \"I AM HEALTHY\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"http2-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"http2-health-check\"),\n\t\t\tDescription: pulumi.String(\"Health check via http2\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHealthyThreshold: pulumi.Int(4),\n\t\t\tUnhealthyThreshold: pulumi.Int(5),\n\t\t\tHttp2HealthCheck: \u0026compute.HealthCheckHttp2HealthCheckArgs{\n\t\t\t\tPortName: pulumi.String(\"health-check-port\"),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_NAMED_PORT\"),\n\t\t\t\tHost: pulumi.String(\"1.2.3.4\"),\n\t\t\t\tRequestPath: pulumi.String(\"/mypath\"),\n\t\t\t\tProxyHeader: pulumi.String(\"NONE\"),\n\t\t\t\tResponse: pulumi.String(\"I AM HEALTHY\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckHttp2HealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var http2_health_check = new HealthCheck(\"http2-health-check\", HealthCheckArgs.builder()\n .name(\"http2-health-check\")\n .description(\"Health check via http2\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .healthyThreshold(4)\n .unhealthyThreshold(5)\n .http2HealthCheck(HealthCheckHttp2HealthCheckArgs.builder()\n .portName(\"health-check-port\")\n .portSpecification(\"USE_NAMED_PORT\")\n .host(\"1.2.3.4\")\n .requestPath(\"/mypath\")\n .proxyHeader(\"NONE\")\n .response(\"I AM HEALTHY\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n http2-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: http2-health-check\n description: Health check via http2\n timeoutSec: 1\n checkIntervalSec: 1\n healthyThreshold: 4\n unhealthyThreshold: 5\n http2HealthCheck:\n portName: health-check-port\n portSpecification: USE_NAMED_PORT\n host: 1.2.3.4\n requestPath: /mypath\n proxyHeader: NONE\n response: I AM HEALTHY\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Grpc\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst grpc_health_check = new gcp.compute.HealthCheck(\"grpc-health-check\", {\n name: \"grpc-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n grpcHealthCheck: {\n port: 443,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ngrpc_health_check = gcp.compute.HealthCheck(\"grpc-health-check\",\n name=\"grpc-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n grpc_health_check={\n \"port\": 443,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var grpc_health_check = new Gcp.Compute.HealthCheck(\"grpc-health-check\", new()\n {\n Name = \"grpc-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n GrpcHealthCheck = new Gcp.Compute.Inputs.HealthCheckGrpcHealthCheckArgs\n {\n Port = 443,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"grpc-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"grpc-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tGrpcHealthCheck: \u0026compute.HealthCheckGrpcHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(443),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckGrpcHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var grpc_health_check = new HealthCheck(\"grpc-health-check\", HealthCheckArgs.builder()\n .name(\"grpc-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .grpcHealthCheck(HealthCheckGrpcHealthCheckArgs.builder()\n .port(\"443\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n grpc-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: grpc-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n grpcHealthCheck:\n port: '443'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Grpc Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst grpc_health_check = new gcp.compute.HealthCheck(\"grpc-health-check\", {\n name: \"grpc-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n grpcHealthCheck: {\n portName: \"health-check-port\",\n portSpecification: \"USE_NAMED_PORT\",\n grpcServiceName: \"testservice\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ngrpc_health_check = gcp.compute.HealthCheck(\"grpc-health-check\",\n name=\"grpc-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n grpc_health_check={\n \"port_name\": \"health-check-port\",\n \"port_specification\": \"USE_NAMED_PORT\",\n \"grpc_service_name\": \"testservice\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var grpc_health_check = new Gcp.Compute.HealthCheck(\"grpc-health-check\", new()\n {\n Name = \"grpc-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n GrpcHealthCheck = new Gcp.Compute.Inputs.HealthCheckGrpcHealthCheckArgs\n {\n PortName = \"health-check-port\",\n PortSpecification = \"USE_NAMED_PORT\",\n GrpcServiceName = \"testservice\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"grpc-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"grpc-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tGrpcHealthCheck: \u0026compute.HealthCheckGrpcHealthCheckArgs{\n\t\t\t\tPortName: pulumi.String(\"health-check-port\"),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_NAMED_PORT\"),\n\t\t\t\tGrpcServiceName: pulumi.String(\"testservice\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckGrpcHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var grpc_health_check = new HealthCheck(\"grpc-health-check\", HealthCheckArgs.builder()\n .name(\"grpc-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .grpcHealthCheck(HealthCheckGrpcHealthCheckArgs.builder()\n .portName(\"health-check-port\")\n .portSpecification(\"USE_NAMED_PORT\")\n .grpcServiceName(\"testservice\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n grpc-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: grpc-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n grpcHealthCheck:\n portName: health-check-port\n portSpecification: USE_NAMED_PORT\n grpcServiceName: testservice\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check With Logging\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst health_check_with_logging = new gcp.compute.HealthCheck(\"health-check-with-logging\", {\n name: \"tcp-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n tcpHealthCheck: {\n port: 22,\n },\n logConfig: {\n enable: true,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhealth_check_with_logging = gcp.compute.HealthCheck(\"health-check-with-logging\",\n name=\"tcp-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n tcp_health_check={\n \"port\": 22,\n },\n log_config={\n \"enable\": True,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var health_check_with_logging = new Gcp.Compute.HealthCheck(\"health-check-with-logging\", new()\n {\n Name = \"tcp-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n TcpHealthCheck = new Gcp.Compute.Inputs.HealthCheckTcpHealthCheckArgs\n {\n Port = 22,\n },\n LogConfig = new Gcp.Compute.Inputs.HealthCheckLogConfigArgs\n {\n Enable = true,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"health-check-with-logging\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"tcp-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tTcpHealthCheck: \u0026compute.HealthCheckTcpHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(22),\n\t\t\t},\n\t\t\tLogConfig: \u0026compute.HealthCheckLogConfigArgs{\n\t\t\t\tEnable: pulumi.Bool(true),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckTcpHealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckLogConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var health_check_with_logging = new HealthCheck(\"health-check-with-logging\", HealthCheckArgs.builder()\n .name(\"tcp-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .tcpHealthCheck(HealthCheckTcpHealthCheckArgs.builder()\n .port(\"22\")\n .build())\n .logConfig(HealthCheckLogConfigArgs.builder()\n .enable(true)\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n health-check-with-logging:\n type: gcp:compute:HealthCheck\n properties:\n name: tcp-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n tcpHealthCheck:\n port: '22'\n logConfig:\n enable: true\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nHealthCheck can be imported using any of these accepted formats:\n\n* `projects/{{project}}/global/healthChecks/{{name}}`\n\n* `{{project}}/{{name}}`\n\n* `{{name}}`\n\nWhen using the `pulumi import` command, HealthCheck can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:compute/healthCheck:HealthCheck default projects/{{project}}/global/healthChecks/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/healthCheck:HealthCheck default {{project}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/healthCheck:HealthCheck default {{name}}\n```\n\n", + "description": "Health Checks determine whether instances are responsive and able to do work.\nThey are an important part of a comprehensive load balancing configuration,\nas they enable monitoring instances behind load balancers.\n\nHealth Checks poll instances at a specified interval. Instances that\ndo not respond successfully to some number of probes in a row are marked\nas unhealthy. No new connections are sent to unhealthy instances,\nthough existing connections will continue. The health check will\ncontinue to poll unhealthy instances. If an instance later responds\nsuccessfully to some number of consecutive probes, it is marked\nhealthy again and can receive new connections.\n\n~\u003e**NOTE**: Legacy HTTP(S) health checks must be used for target pool-based network\nload balancers. See the [official guide](https://cloud.google.com/load-balancing/docs/health-check-concepts#selecting_hc)\nfor choosing a type of health check.\n\n\nTo get more information about HealthCheck, see:\n\n* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/load-balancing/docs/health-checks)\n\n## Example Usage\n\n### Health Check Tcp\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst tcp_health_check = new gcp.compute.HealthCheck(\"tcp-health-check\", {\n name: \"tcp-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n tcpHealthCheck: {\n port: 80,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ntcp_health_check = gcp.compute.HealthCheck(\"tcp-health-check\",\n name=\"tcp-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n tcp_health_check={\n \"port\": 80,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var tcp_health_check = new Gcp.Compute.HealthCheck(\"tcp-health-check\", new()\n {\n Name = \"tcp-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n TcpHealthCheck = new Gcp.Compute.Inputs.HealthCheckTcpHealthCheckArgs\n {\n Port = 80,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"tcp-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"tcp-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tTcpHealthCheck: \u0026compute.HealthCheckTcpHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(80),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckTcpHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var tcp_health_check = new HealthCheck(\"tcp-health-check\", HealthCheckArgs.builder()\n .name(\"tcp-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .tcpHealthCheck(HealthCheckTcpHealthCheckArgs.builder()\n .port(\"80\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n tcp-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: tcp-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n tcpHealthCheck:\n port: '80'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Tcp Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst tcp_health_check = new gcp.compute.HealthCheck(\"tcp-health-check\", {\n name: \"tcp-health-check\",\n description: \"Health check via tcp\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n healthyThreshold: 4,\n unhealthyThreshold: 5,\n tcpHealthCheck: {\n portName: \"health-check-port\",\n portSpecification: \"USE_NAMED_PORT\",\n request: \"ARE YOU HEALTHY?\",\n proxyHeader: \"NONE\",\n response: \"I AM HEALTHY\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ntcp_health_check = gcp.compute.HealthCheck(\"tcp-health-check\",\n name=\"tcp-health-check\",\n description=\"Health check via tcp\",\n timeout_sec=1,\n check_interval_sec=1,\n healthy_threshold=4,\n unhealthy_threshold=5,\n tcp_health_check={\n \"port_name\": \"health-check-port\",\n \"port_specification\": \"USE_NAMED_PORT\",\n \"request\": \"ARE YOU HEALTHY?\",\n \"proxy_header\": \"NONE\",\n \"response\": \"I AM HEALTHY\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var tcp_health_check = new Gcp.Compute.HealthCheck(\"tcp-health-check\", new()\n {\n Name = \"tcp-health-check\",\n Description = \"Health check via tcp\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n HealthyThreshold = 4,\n UnhealthyThreshold = 5,\n TcpHealthCheck = new Gcp.Compute.Inputs.HealthCheckTcpHealthCheckArgs\n {\n PortName = \"health-check-port\",\n PortSpecification = \"USE_NAMED_PORT\",\n Request = \"ARE YOU HEALTHY?\",\n ProxyHeader = \"NONE\",\n Response = \"I AM HEALTHY\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"tcp-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"tcp-health-check\"),\n\t\t\tDescription: pulumi.String(\"Health check via tcp\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHealthyThreshold: pulumi.Int(4),\n\t\t\tUnhealthyThreshold: pulumi.Int(5),\n\t\t\tTcpHealthCheck: \u0026compute.HealthCheckTcpHealthCheckArgs{\n\t\t\t\tPortName: pulumi.String(\"health-check-port\"),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_NAMED_PORT\"),\n\t\t\t\tRequest: pulumi.String(\"ARE YOU HEALTHY?\"),\n\t\t\t\tProxyHeader: pulumi.String(\"NONE\"),\n\t\t\t\tResponse: pulumi.String(\"I AM HEALTHY\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckTcpHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var tcp_health_check = new HealthCheck(\"tcp-health-check\", HealthCheckArgs.builder()\n .name(\"tcp-health-check\")\n .description(\"Health check via tcp\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .healthyThreshold(4)\n .unhealthyThreshold(5)\n .tcpHealthCheck(HealthCheckTcpHealthCheckArgs.builder()\n .portName(\"health-check-port\")\n .portSpecification(\"USE_NAMED_PORT\")\n .request(\"ARE YOU HEALTHY?\")\n .proxyHeader(\"NONE\")\n .response(\"I AM HEALTHY\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n tcp-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: tcp-health-check\n description: Health check via tcp\n timeoutSec: 1\n checkIntervalSec: 1\n healthyThreshold: 4\n unhealthyThreshold: 5\n tcpHealthCheck:\n portName: health-check-port\n portSpecification: USE_NAMED_PORT\n request: ARE YOU HEALTHY?\n proxyHeader: NONE\n response: I AM HEALTHY\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Ssl\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst ssl_health_check = new gcp.compute.HealthCheck(\"ssl-health-check\", {\n name: \"ssl-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n sslHealthCheck: {\n port: 443,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nssl_health_check = gcp.compute.HealthCheck(\"ssl-health-check\",\n name=\"ssl-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n ssl_health_check={\n \"port\": 443,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var ssl_health_check = new Gcp.Compute.HealthCheck(\"ssl-health-check\", new()\n {\n Name = \"ssl-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n SslHealthCheck = new Gcp.Compute.Inputs.HealthCheckSslHealthCheckArgs\n {\n Port = 443,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"ssl-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"ssl-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tSslHealthCheck: \u0026compute.HealthCheckSslHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(443),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckSslHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var ssl_health_check = new HealthCheck(\"ssl-health-check\", HealthCheckArgs.builder()\n .name(\"ssl-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .sslHealthCheck(HealthCheckSslHealthCheckArgs.builder()\n .port(\"443\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n ssl-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: ssl-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n sslHealthCheck:\n port: '443'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Ssl Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst ssl_health_check = new gcp.compute.HealthCheck(\"ssl-health-check\", {\n name: \"ssl-health-check\",\n description: \"Health check via ssl\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n healthyThreshold: 4,\n unhealthyThreshold: 5,\n sslHealthCheck: {\n portName: \"health-check-port\",\n portSpecification: \"USE_NAMED_PORT\",\n request: \"ARE YOU HEALTHY?\",\n proxyHeader: \"NONE\",\n response: \"I AM HEALTHY\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nssl_health_check = gcp.compute.HealthCheck(\"ssl-health-check\",\n name=\"ssl-health-check\",\n description=\"Health check via ssl\",\n timeout_sec=1,\n check_interval_sec=1,\n healthy_threshold=4,\n unhealthy_threshold=5,\n ssl_health_check={\n \"port_name\": \"health-check-port\",\n \"port_specification\": \"USE_NAMED_PORT\",\n \"request\": \"ARE YOU HEALTHY?\",\n \"proxy_header\": \"NONE\",\n \"response\": \"I AM HEALTHY\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var ssl_health_check = new Gcp.Compute.HealthCheck(\"ssl-health-check\", new()\n {\n Name = \"ssl-health-check\",\n Description = \"Health check via ssl\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n HealthyThreshold = 4,\n UnhealthyThreshold = 5,\n SslHealthCheck = new Gcp.Compute.Inputs.HealthCheckSslHealthCheckArgs\n {\n PortName = \"health-check-port\",\n PortSpecification = \"USE_NAMED_PORT\",\n Request = \"ARE YOU HEALTHY?\",\n ProxyHeader = \"NONE\",\n Response = \"I AM HEALTHY\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"ssl-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"ssl-health-check\"),\n\t\t\tDescription: pulumi.String(\"Health check via ssl\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHealthyThreshold: pulumi.Int(4),\n\t\t\tUnhealthyThreshold: pulumi.Int(5),\n\t\t\tSslHealthCheck: \u0026compute.HealthCheckSslHealthCheckArgs{\n\t\t\t\tPortName: pulumi.String(\"health-check-port\"),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_NAMED_PORT\"),\n\t\t\t\tRequest: pulumi.String(\"ARE YOU HEALTHY?\"),\n\t\t\t\tProxyHeader: pulumi.String(\"NONE\"),\n\t\t\t\tResponse: pulumi.String(\"I AM HEALTHY\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckSslHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var ssl_health_check = new HealthCheck(\"ssl-health-check\", HealthCheckArgs.builder()\n .name(\"ssl-health-check\")\n .description(\"Health check via ssl\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .healthyThreshold(4)\n .unhealthyThreshold(5)\n .sslHealthCheck(HealthCheckSslHealthCheckArgs.builder()\n .portName(\"health-check-port\")\n .portSpecification(\"USE_NAMED_PORT\")\n .request(\"ARE YOU HEALTHY?\")\n .proxyHeader(\"NONE\")\n .response(\"I AM HEALTHY\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n ssl-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: ssl-health-check\n description: Health check via ssl\n timeoutSec: 1\n checkIntervalSec: 1\n healthyThreshold: 4\n unhealthyThreshold: 5\n sslHealthCheck:\n portName: health-check-port\n portSpecification: USE_NAMED_PORT\n request: ARE YOU HEALTHY?\n proxyHeader: NONE\n response: I AM HEALTHY\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Http\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst http_health_check = new gcp.compute.HealthCheck(\"http-health-check\", {\n name: \"http-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n httpHealthCheck: {\n port: 80,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhttp_health_check = gcp.compute.HealthCheck(\"http-health-check\",\n name=\"http-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n http_health_check={\n \"port\": 80,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var http_health_check = new Gcp.Compute.HealthCheck(\"http-health-check\", new()\n {\n Name = \"http-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n HttpHealthCheck = new Gcp.Compute.Inputs.HealthCheckHttpHealthCheckArgs\n {\n Port = 80,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"http-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"http-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHttpHealthCheck: \u0026compute.HealthCheckHttpHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(80),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckHttpHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var http_health_check = new HealthCheck(\"http-health-check\", HealthCheckArgs.builder()\n .name(\"http-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .httpHealthCheck(HealthCheckHttpHealthCheckArgs.builder()\n .port(80)\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n http-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: http-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n httpHealthCheck:\n port: 80\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Http Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst http_health_check = new gcp.compute.HealthCheck(\"http-health-check\", {\n name: \"http-health-check\",\n description: \"Health check via http\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n healthyThreshold: 4,\n unhealthyThreshold: 5,\n httpHealthCheck: {\n portName: \"health-check-port\",\n portSpecification: \"USE_NAMED_PORT\",\n host: \"1.2.3.4\",\n requestPath: \"/mypath\",\n proxyHeader: \"NONE\",\n response: \"I AM HEALTHY\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhttp_health_check = gcp.compute.HealthCheck(\"http-health-check\",\n name=\"http-health-check\",\n description=\"Health check via http\",\n timeout_sec=1,\n check_interval_sec=1,\n healthy_threshold=4,\n unhealthy_threshold=5,\n http_health_check={\n \"port_name\": \"health-check-port\",\n \"port_specification\": \"USE_NAMED_PORT\",\n \"host\": \"1.2.3.4\",\n \"request_path\": \"/mypath\",\n \"proxy_header\": \"NONE\",\n \"response\": \"I AM HEALTHY\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var http_health_check = new Gcp.Compute.HealthCheck(\"http-health-check\", new()\n {\n Name = \"http-health-check\",\n Description = \"Health check via http\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n HealthyThreshold = 4,\n UnhealthyThreshold = 5,\n HttpHealthCheck = new Gcp.Compute.Inputs.HealthCheckHttpHealthCheckArgs\n {\n PortName = \"health-check-port\",\n PortSpecification = \"USE_NAMED_PORT\",\n Host = \"1.2.3.4\",\n RequestPath = \"/mypath\",\n ProxyHeader = \"NONE\",\n Response = \"I AM HEALTHY\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"http-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"http-health-check\"),\n\t\t\tDescription: pulumi.String(\"Health check via http\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHealthyThreshold: pulumi.Int(4),\n\t\t\tUnhealthyThreshold: pulumi.Int(5),\n\t\t\tHttpHealthCheck: \u0026compute.HealthCheckHttpHealthCheckArgs{\n\t\t\t\tPortName: pulumi.String(\"health-check-port\"),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_NAMED_PORT\"),\n\t\t\t\tHost: pulumi.String(\"1.2.3.4\"),\n\t\t\t\tRequestPath: pulumi.String(\"/mypath\"),\n\t\t\t\tProxyHeader: pulumi.String(\"NONE\"),\n\t\t\t\tResponse: pulumi.String(\"I AM HEALTHY\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckHttpHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var http_health_check = new HealthCheck(\"http-health-check\", HealthCheckArgs.builder()\n .name(\"http-health-check\")\n .description(\"Health check via http\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .healthyThreshold(4)\n .unhealthyThreshold(5)\n .httpHealthCheck(HealthCheckHttpHealthCheckArgs.builder()\n .portName(\"health-check-port\")\n .portSpecification(\"USE_NAMED_PORT\")\n .host(\"1.2.3.4\")\n .requestPath(\"/mypath\")\n .proxyHeader(\"NONE\")\n .response(\"I AM HEALTHY\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n http-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: http-health-check\n description: Health check via http\n timeoutSec: 1\n checkIntervalSec: 1\n healthyThreshold: 4\n unhealthyThreshold: 5\n httpHealthCheck:\n portName: health-check-port\n portSpecification: USE_NAMED_PORT\n host: 1.2.3.4\n requestPath: /mypath\n proxyHeader: NONE\n response: I AM HEALTHY\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Https\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst https_health_check = new gcp.compute.HealthCheck(\"https-health-check\", {\n name: \"https-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n httpsHealthCheck: {\n port: 443,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhttps_health_check = gcp.compute.HealthCheck(\"https-health-check\",\n name=\"https-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n https_health_check={\n \"port\": 443,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var https_health_check = new Gcp.Compute.HealthCheck(\"https-health-check\", new()\n {\n Name = \"https-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n HttpsHealthCheck = new Gcp.Compute.Inputs.HealthCheckHttpsHealthCheckArgs\n {\n Port = 443,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"https-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"https-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHttpsHealthCheck: \u0026compute.HealthCheckHttpsHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(443),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckHttpsHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var https_health_check = new HealthCheck(\"https-health-check\", HealthCheckArgs.builder()\n .name(\"https-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .httpsHealthCheck(HealthCheckHttpsHealthCheckArgs.builder()\n .port(\"443\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n https-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: https-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n httpsHealthCheck:\n port: '443'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Https Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst https_health_check = new gcp.compute.HealthCheck(\"https-health-check\", {\n name: \"https-health-check\",\n description: \"Health check via https\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n healthyThreshold: 4,\n unhealthyThreshold: 5,\n httpsHealthCheck: {\n portName: \"health-check-port\",\n portSpecification: \"USE_NAMED_PORT\",\n host: \"1.2.3.4\",\n requestPath: \"/mypath\",\n proxyHeader: \"NONE\",\n response: \"I AM HEALTHY\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhttps_health_check = gcp.compute.HealthCheck(\"https-health-check\",\n name=\"https-health-check\",\n description=\"Health check via https\",\n timeout_sec=1,\n check_interval_sec=1,\n healthy_threshold=4,\n unhealthy_threshold=5,\n https_health_check={\n \"port_name\": \"health-check-port\",\n \"port_specification\": \"USE_NAMED_PORT\",\n \"host\": \"1.2.3.4\",\n \"request_path\": \"/mypath\",\n \"proxy_header\": \"NONE\",\n \"response\": \"I AM HEALTHY\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var https_health_check = new Gcp.Compute.HealthCheck(\"https-health-check\", new()\n {\n Name = \"https-health-check\",\n Description = \"Health check via https\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n HealthyThreshold = 4,\n UnhealthyThreshold = 5,\n HttpsHealthCheck = new Gcp.Compute.Inputs.HealthCheckHttpsHealthCheckArgs\n {\n PortName = \"health-check-port\",\n PortSpecification = \"USE_NAMED_PORT\",\n Host = \"1.2.3.4\",\n RequestPath = \"/mypath\",\n ProxyHeader = \"NONE\",\n Response = \"I AM HEALTHY\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"https-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"https-health-check\"),\n\t\t\tDescription: pulumi.String(\"Health check via https\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHealthyThreshold: pulumi.Int(4),\n\t\t\tUnhealthyThreshold: pulumi.Int(5),\n\t\t\tHttpsHealthCheck: \u0026compute.HealthCheckHttpsHealthCheckArgs{\n\t\t\t\tPortName: pulumi.String(\"health-check-port\"),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_NAMED_PORT\"),\n\t\t\t\tHost: pulumi.String(\"1.2.3.4\"),\n\t\t\t\tRequestPath: pulumi.String(\"/mypath\"),\n\t\t\t\tProxyHeader: pulumi.String(\"NONE\"),\n\t\t\t\tResponse: pulumi.String(\"I AM HEALTHY\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckHttpsHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var https_health_check = new HealthCheck(\"https-health-check\", HealthCheckArgs.builder()\n .name(\"https-health-check\")\n .description(\"Health check via https\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .healthyThreshold(4)\n .unhealthyThreshold(5)\n .httpsHealthCheck(HealthCheckHttpsHealthCheckArgs.builder()\n .portName(\"health-check-port\")\n .portSpecification(\"USE_NAMED_PORT\")\n .host(\"1.2.3.4\")\n .requestPath(\"/mypath\")\n .proxyHeader(\"NONE\")\n .response(\"I AM HEALTHY\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n https-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: https-health-check\n description: Health check via https\n timeoutSec: 1\n checkIntervalSec: 1\n healthyThreshold: 4\n unhealthyThreshold: 5\n httpsHealthCheck:\n portName: health-check-port\n portSpecification: USE_NAMED_PORT\n host: 1.2.3.4\n requestPath: /mypath\n proxyHeader: NONE\n response: I AM HEALTHY\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Http2\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst http2_health_check = new gcp.compute.HealthCheck(\"http2-health-check\", {\n name: \"http2-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n http2HealthCheck: {\n port: 443,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhttp2_health_check = gcp.compute.HealthCheck(\"http2-health-check\",\n name=\"http2-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n http2_health_check={\n \"port\": 443,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var http2_health_check = new Gcp.Compute.HealthCheck(\"http2-health-check\", new()\n {\n Name = \"http2-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n Http2HealthCheck = new Gcp.Compute.Inputs.HealthCheckHttp2HealthCheckArgs\n {\n Port = 443,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"http2-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"http2-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHttp2HealthCheck: \u0026compute.HealthCheckHttp2HealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(443),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckHttp2HealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var http2_health_check = new HealthCheck(\"http2-health-check\", HealthCheckArgs.builder()\n .name(\"http2-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .http2HealthCheck(HealthCheckHttp2HealthCheckArgs.builder()\n .port(\"443\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n http2-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: http2-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n http2HealthCheck:\n port: '443'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Http2 Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst http2_health_check = new gcp.compute.HealthCheck(\"http2-health-check\", {\n name: \"http2-health-check\",\n description: \"Health check via http2\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n healthyThreshold: 4,\n unhealthyThreshold: 5,\n http2HealthCheck: {\n portName: \"health-check-port\",\n portSpecification: \"USE_NAMED_PORT\",\n host: \"1.2.3.4\",\n requestPath: \"/mypath\",\n proxyHeader: \"NONE\",\n response: \"I AM HEALTHY\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhttp2_health_check = gcp.compute.HealthCheck(\"http2-health-check\",\n name=\"http2-health-check\",\n description=\"Health check via http2\",\n timeout_sec=1,\n check_interval_sec=1,\n healthy_threshold=4,\n unhealthy_threshold=5,\n http2_health_check={\n \"port_name\": \"health-check-port\",\n \"port_specification\": \"USE_NAMED_PORT\",\n \"host\": \"1.2.3.4\",\n \"request_path\": \"/mypath\",\n \"proxy_header\": \"NONE\",\n \"response\": \"I AM HEALTHY\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var http2_health_check = new Gcp.Compute.HealthCheck(\"http2-health-check\", new()\n {\n Name = \"http2-health-check\",\n Description = \"Health check via http2\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n HealthyThreshold = 4,\n UnhealthyThreshold = 5,\n Http2HealthCheck = new Gcp.Compute.Inputs.HealthCheckHttp2HealthCheckArgs\n {\n PortName = \"health-check-port\",\n PortSpecification = \"USE_NAMED_PORT\",\n Host = \"1.2.3.4\",\n RequestPath = \"/mypath\",\n ProxyHeader = \"NONE\",\n Response = \"I AM HEALTHY\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"http2-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"http2-health-check\"),\n\t\t\tDescription: pulumi.String(\"Health check via http2\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tHealthyThreshold: pulumi.Int(4),\n\t\t\tUnhealthyThreshold: pulumi.Int(5),\n\t\t\tHttp2HealthCheck: \u0026compute.HealthCheckHttp2HealthCheckArgs{\n\t\t\t\tPortName: pulumi.String(\"health-check-port\"),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_NAMED_PORT\"),\n\t\t\t\tHost: pulumi.String(\"1.2.3.4\"),\n\t\t\t\tRequestPath: pulumi.String(\"/mypath\"),\n\t\t\t\tProxyHeader: pulumi.String(\"NONE\"),\n\t\t\t\tResponse: pulumi.String(\"I AM HEALTHY\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckHttp2HealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var http2_health_check = new HealthCheck(\"http2-health-check\", HealthCheckArgs.builder()\n .name(\"http2-health-check\")\n .description(\"Health check via http2\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .healthyThreshold(4)\n .unhealthyThreshold(5)\n .http2HealthCheck(HealthCheckHttp2HealthCheckArgs.builder()\n .portName(\"health-check-port\")\n .portSpecification(\"USE_NAMED_PORT\")\n .host(\"1.2.3.4\")\n .requestPath(\"/mypath\")\n .proxyHeader(\"NONE\")\n .response(\"I AM HEALTHY\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n http2-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: http2-health-check\n description: Health check via http2\n timeoutSec: 1\n checkIntervalSec: 1\n healthyThreshold: 4\n unhealthyThreshold: 5\n http2HealthCheck:\n portName: health-check-port\n portSpecification: USE_NAMED_PORT\n host: 1.2.3.4\n requestPath: /mypath\n proxyHeader: NONE\n response: I AM HEALTHY\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Grpc\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst grpc_health_check = new gcp.compute.HealthCheck(\"grpc-health-check\", {\n name: \"grpc-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n grpcHealthCheck: {\n port: 443,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ngrpc_health_check = gcp.compute.HealthCheck(\"grpc-health-check\",\n name=\"grpc-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n grpc_health_check={\n \"port\": 443,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var grpc_health_check = new Gcp.Compute.HealthCheck(\"grpc-health-check\", new()\n {\n Name = \"grpc-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n GrpcHealthCheck = new Gcp.Compute.Inputs.HealthCheckGrpcHealthCheckArgs\n {\n Port = 443,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"grpc-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"grpc-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tGrpcHealthCheck: \u0026compute.HealthCheckGrpcHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(443),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckGrpcHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var grpc_health_check = new HealthCheck(\"grpc-health-check\", HealthCheckArgs.builder()\n .name(\"grpc-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .grpcHealthCheck(HealthCheckGrpcHealthCheckArgs.builder()\n .port(\"443\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n grpc-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: grpc-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n grpcHealthCheck:\n port: '443'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check Grpc Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst grpc_health_check = new gcp.compute.HealthCheck(\"grpc-health-check\", {\n name: \"grpc-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n grpcHealthCheck: {\n portName: \"health-check-port\",\n portSpecification: \"USE_NAMED_PORT\",\n grpcServiceName: \"testservice\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ngrpc_health_check = gcp.compute.HealthCheck(\"grpc-health-check\",\n name=\"grpc-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n grpc_health_check={\n \"port_name\": \"health-check-port\",\n \"port_specification\": \"USE_NAMED_PORT\",\n \"grpc_service_name\": \"testservice\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var grpc_health_check = new Gcp.Compute.HealthCheck(\"grpc-health-check\", new()\n {\n Name = \"grpc-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n GrpcHealthCheck = new Gcp.Compute.Inputs.HealthCheckGrpcHealthCheckArgs\n {\n PortName = \"health-check-port\",\n PortSpecification = \"USE_NAMED_PORT\",\n GrpcServiceName = \"testservice\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"grpc-health-check\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"grpc-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tGrpcHealthCheck: \u0026compute.HealthCheckGrpcHealthCheckArgs{\n\t\t\t\tPortName: pulumi.String(\"health-check-port\"),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_NAMED_PORT\"),\n\t\t\t\tGrpcServiceName: pulumi.String(\"testservice\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckGrpcHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var grpc_health_check = new HealthCheck(\"grpc-health-check\", HealthCheckArgs.builder()\n .name(\"grpc-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .grpcHealthCheck(HealthCheckGrpcHealthCheckArgs.builder()\n .portName(\"health-check-port\")\n .portSpecification(\"USE_NAMED_PORT\")\n .grpcServiceName(\"testservice\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n grpc-health-check:\n type: gcp:compute:HealthCheck\n properties:\n name: grpc-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n grpcHealthCheck:\n portName: health-check-port\n portSpecification: USE_NAMED_PORT\n grpcServiceName: testservice\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Health Check With Logging\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst health_check_with_logging = new gcp.compute.HealthCheck(\"health-check-with-logging\", {\n name: \"tcp-health-check\",\n timeoutSec: 1,\n checkIntervalSec: 1,\n tcpHealthCheck: {\n port: 22,\n },\n logConfig: {\n enable: true,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhealth_check_with_logging = gcp.compute.HealthCheck(\"health-check-with-logging\",\n name=\"tcp-health-check\",\n timeout_sec=1,\n check_interval_sec=1,\n tcp_health_check={\n \"port\": 22,\n },\n log_config={\n \"enable\": True,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var health_check_with_logging = new Gcp.Compute.HealthCheck(\"health-check-with-logging\", new()\n {\n Name = \"tcp-health-check\",\n TimeoutSec = 1,\n CheckIntervalSec = 1,\n TcpHealthCheck = new Gcp.Compute.Inputs.HealthCheckTcpHealthCheckArgs\n {\n Port = 22,\n },\n LogConfig = new Gcp.Compute.Inputs.HealthCheckLogConfigArgs\n {\n Enable = true,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"health-check-with-logging\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"tcp-health-check\"),\n\t\t\tTimeoutSec: pulumi.Int(1),\n\t\t\tCheckIntervalSec: pulumi.Int(1),\n\t\t\tTcpHealthCheck: \u0026compute.HealthCheckTcpHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(22),\n\t\t\t},\n\t\t\tLogConfig: \u0026compute.HealthCheckLogConfigArgs{\n\t\t\t\tEnable: pulumi.Bool(true),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckTcpHealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckLogConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var health_check_with_logging = new HealthCheck(\"health-check-with-logging\", HealthCheckArgs.builder()\n .name(\"tcp-health-check\")\n .timeoutSec(1)\n .checkIntervalSec(1)\n .tcpHealthCheck(HealthCheckTcpHealthCheckArgs.builder()\n .port(\"22\")\n .build())\n .logConfig(HealthCheckLogConfigArgs.builder()\n .enable(true)\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n health-check-with-logging:\n type: gcp:compute:HealthCheck\n properties:\n name: tcp-health-check\n timeoutSec: 1\n checkIntervalSec: 1\n tcpHealthCheck:\n port: '22'\n logConfig:\n enable: true\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Compute Health Check Http Source Regions\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst http_health_check_with_source_regions = new gcp.compute.HealthCheck(\"http-health-check-with-source-regions\", {\n name: \"http-health-check\",\n checkIntervalSec: 30,\n httpHealthCheck: {\n port: 80,\n portSpecification: \"USE_FIXED_PORT\",\n },\n sourceRegions: [\n \"us-west1\",\n \"us-central1\",\n \"us-east5\",\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhttp_health_check_with_source_regions = gcp.compute.HealthCheck(\"http-health-check-with-source-regions\",\n name=\"http-health-check\",\n check_interval_sec=30,\n http_health_check={\n \"port\": 80,\n \"port_specification\": \"USE_FIXED_PORT\",\n },\n source_regions=[\n \"us-west1\",\n \"us-central1\",\n \"us-east5\",\n ])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var http_health_check_with_source_regions = new Gcp.Compute.HealthCheck(\"http-health-check-with-source-regions\", new()\n {\n Name = \"http-health-check\",\n CheckIntervalSec = 30,\n HttpHealthCheck = new Gcp.Compute.Inputs.HealthCheckHttpHealthCheckArgs\n {\n Port = 80,\n PortSpecification = \"USE_FIXED_PORT\",\n },\n SourceRegions = new[]\n {\n \"us-west1\",\n \"us-central1\",\n \"us-east5\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"http-health-check-with-source-regions\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"http-health-check\"),\n\t\t\tCheckIntervalSec: pulumi.Int(30),\n\t\t\tHttpHealthCheck: \u0026compute.HealthCheckHttpHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(80),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_FIXED_PORT\"),\n\t\t\t},\n\t\t\tSourceRegions: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"us-west1\"),\n\t\t\t\tpulumi.String(\"us-central1\"),\n\t\t\t\tpulumi.String(\"us-east5\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckHttpHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var http_health_check_with_source_regions = new HealthCheck(\"http-health-check-with-source-regions\", HealthCheckArgs.builder()\n .name(\"http-health-check\")\n .checkIntervalSec(30)\n .httpHealthCheck(HealthCheckHttpHealthCheckArgs.builder()\n .port(80)\n .portSpecification(\"USE_FIXED_PORT\")\n .build())\n .sourceRegions( \n \"us-west1\",\n \"us-central1\",\n \"us-east5\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n http-health-check-with-source-regions:\n type: gcp:compute:HealthCheck\n properties:\n name: http-health-check\n checkIntervalSec: 30\n httpHealthCheck:\n port: 80\n portSpecification: USE_FIXED_PORT\n sourceRegions:\n - us-west1\n - us-central1\n - us-east5\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Compute Health Check Https Source Regions\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst https_health_check_with_source_regions = new gcp.compute.HealthCheck(\"https-health-check-with-source-regions\", {\n name: \"https-health-check\",\n checkIntervalSec: 30,\n httpsHealthCheck: {\n port: 80,\n portSpecification: \"USE_FIXED_PORT\",\n },\n sourceRegions: [\n \"us-west1\",\n \"us-central1\",\n \"us-east5\",\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nhttps_health_check_with_source_regions = gcp.compute.HealthCheck(\"https-health-check-with-source-regions\",\n name=\"https-health-check\",\n check_interval_sec=30,\n https_health_check={\n \"port\": 80,\n \"port_specification\": \"USE_FIXED_PORT\",\n },\n source_regions=[\n \"us-west1\",\n \"us-central1\",\n \"us-east5\",\n ])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var https_health_check_with_source_regions = new Gcp.Compute.HealthCheck(\"https-health-check-with-source-regions\", new()\n {\n Name = \"https-health-check\",\n CheckIntervalSec = 30,\n HttpsHealthCheck = new Gcp.Compute.Inputs.HealthCheckHttpsHealthCheckArgs\n {\n Port = 80,\n PortSpecification = \"USE_FIXED_PORT\",\n },\n SourceRegions = new[]\n {\n \"us-west1\",\n \"us-central1\",\n \"us-east5\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"https-health-check-with-source-regions\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"https-health-check\"),\n\t\t\tCheckIntervalSec: pulumi.Int(30),\n\t\t\tHttpsHealthCheck: \u0026compute.HealthCheckHttpsHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(80),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_FIXED_PORT\"),\n\t\t\t},\n\t\t\tSourceRegions: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"us-west1\"),\n\t\t\t\tpulumi.String(\"us-central1\"),\n\t\t\t\tpulumi.String(\"us-east5\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckHttpsHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var https_health_check_with_source_regions = new HealthCheck(\"https-health-check-with-source-regions\", HealthCheckArgs.builder()\n .name(\"https-health-check\")\n .checkIntervalSec(30)\n .httpsHealthCheck(HealthCheckHttpsHealthCheckArgs.builder()\n .port(80)\n .portSpecification(\"USE_FIXED_PORT\")\n .build())\n .sourceRegions( \n \"us-west1\",\n \"us-central1\",\n \"us-east5\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n https-health-check-with-source-regions:\n type: gcp:compute:HealthCheck\n properties:\n name: https-health-check\n checkIntervalSec: 30\n httpsHealthCheck:\n port: 80\n portSpecification: USE_FIXED_PORT\n sourceRegions:\n - us-west1\n - us-central1\n - us-east5\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Compute Health Check Tcp Source Regions\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst tcp_health_check_with_source_regions = new gcp.compute.HealthCheck(\"tcp-health-check-with-source-regions\", {\n name: \"tcp-health-check\",\n checkIntervalSec: 30,\n tcpHealthCheck: {\n port: 80,\n portSpecification: \"USE_FIXED_PORT\",\n },\n sourceRegions: [\n \"us-west1\",\n \"us-central1\",\n \"us-east5\",\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ntcp_health_check_with_source_regions = gcp.compute.HealthCheck(\"tcp-health-check-with-source-regions\",\n name=\"tcp-health-check\",\n check_interval_sec=30,\n tcp_health_check={\n \"port\": 80,\n \"port_specification\": \"USE_FIXED_PORT\",\n },\n source_regions=[\n \"us-west1\",\n \"us-central1\",\n \"us-east5\",\n ])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var tcp_health_check_with_source_regions = new Gcp.Compute.HealthCheck(\"tcp-health-check-with-source-regions\", new()\n {\n Name = \"tcp-health-check\",\n CheckIntervalSec = 30,\n TcpHealthCheck = new Gcp.Compute.Inputs.HealthCheckTcpHealthCheckArgs\n {\n Port = 80,\n PortSpecification = \"USE_FIXED_PORT\",\n },\n SourceRegions = new[]\n {\n \"us-west1\",\n \"us-central1\",\n \"us-east5\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewHealthCheck(ctx, \"tcp-health-check-with-source-regions\", \u0026compute.HealthCheckArgs{\n\t\t\tName: pulumi.String(\"tcp-health-check\"),\n\t\t\tCheckIntervalSec: pulumi.Int(30),\n\t\t\tTcpHealthCheck: \u0026compute.HealthCheckTcpHealthCheckArgs{\n\t\t\t\tPort: pulumi.Int(80),\n\t\t\t\tPortSpecification: pulumi.String(\"USE_FIXED_PORT\"),\n\t\t\t},\n\t\t\tSourceRegions: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"us-west1\"),\n\t\t\t\tpulumi.String(\"us-central1\"),\n\t\t\t\tpulumi.String(\"us-east5\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.HealthCheck;\nimport com.pulumi.gcp.compute.HealthCheckArgs;\nimport com.pulumi.gcp.compute.inputs.HealthCheckTcpHealthCheckArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var tcp_health_check_with_source_regions = new HealthCheck(\"tcp-health-check-with-source-regions\", HealthCheckArgs.builder()\n .name(\"tcp-health-check\")\n .checkIntervalSec(30)\n .tcpHealthCheck(HealthCheckTcpHealthCheckArgs.builder()\n .port(80)\n .portSpecification(\"USE_FIXED_PORT\")\n .build())\n .sourceRegions( \n \"us-west1\",\n \"us-central1\",\n \"us-east5\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n tcp-health-check-with-source-regions:\n type: gcp:compute:HealthCheck\n properties:\n name: tcp-health-check\n checkIntervalSec: 30\n tcpHealthCheck:\n port: 80\n portSpecification: USE_FIXED_PORT\n sourceRegions:\n - us-west1\n - us-central1\n - us-east5\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nHealthCheck can be imported using any of these accepted formats:\n\n* `projects/{{project}}/global/healthChecks/{{name}}`\n\n* `{{project}}/{{name}}`\n\n* `{{name}}`\n\nWhen using the `pulumi import` command, HealthCheck can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:compute/healthCheck:HealthCheck default projects/{{project}}/global/healthChecks/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/healthCheck:HealthCheck default {{project}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/healthCheck:HealthCheck default {{name}}\n```\n\n", "properties": { "checkIntervalSec": { "type": "integer", @@ -149220,7 +150765,7 @@ } }, "gcp:compute/instance:Instance": { - "description": "Manages a VM instance resource within GCE. For more information see\n[the official documentation](https://cloud.google.com/compute/docs/instances)\nand\n[API](https://cloud.google.com/compute/docs/reference/latest/instances).\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.serviceaccount.Account(\"default\", {\n accountId: \"my-custom-sa\",\n displayName: \"Custom SA for VM Instance\",\n});\nconst defaultInstance = new gcp.compute.Instance(\"default\", {\n networkInterfaces: [{\n accessConfigs: [{}],\n network: \"default\",\n }],\n name: \"my-instance\",\n machineType: \"n2-standard-2\",\n zone: \"us-central1-a\",\n tags: [\n \"foo\",\n \"bar\",\n ],\n bootDisk: {\n initializeParams: {\n image: \"debian-cloud/debian-11\",\n labels: {\n my_label: \"value\",\n },\n },\n },\n scratchDisks: [{\n \"interface\": \"NVME\",\n }],\n metadata: {\n foo: \"bar\",\n },\n metadataStartupScript: \"echo hi \u003e /test.txt\",\n serviceAccount: {\n email: _default.email,\n scopes: [\"cloud-platform\"],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.serviceaccount.Account(\"default\",\n account_id=\"my-custom-sa\",\n display_name=\"Custom SA for VM Instance\")\ndefault_instance = gcp.compute.Instance(\"default\",\n network_interfaces=[{\n \"access_configs\": [{}],\n \"network\": \"default\",\n }],\n name=\"my-instance\",\n machine_type=\"n2-standard-2\",\n zone=\"us-central1-a\",\n tags=[\n \"foo\",\n \"bar\",\n ],\n boot_disk={\n \"initialize_params\": {\n \"image\": \"debian-cloud/debian-11\",\n \"labels\": {\n \"my_label\": \"value\",\n },\n },\n },\n scratch_disks=[{\n \"interface\": \"NVME\",\n }],\n metadata={\n \"foo\": \"bar\",\n },\n metadata_startup_script=\"echo hi \u003e /test.txt\",\n service_account={\n \"email\": default.email,\n \"scopes\": [\"cloud-platform\"],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.ServiceAccount.Account(\"default\", new()\n {\n AccountId = \"my-custom-sa\",\n DisplayName = \"Custom SA for VM Instance\",\n });\n\n var defaultInstance = new Gcp.Compute.Instance(\"default\", new()\n {\n NetworkInterfaces = new[]\n {\n new Gcp.Compute.Inputs.InstanceNetworkInterfaceArgs\n {\n AccessConfigs = new[]\n {\n null,\n },\n Network = \"default\",\n },\n },\n Name = \"my-instance\",\n MachineType = \"n2-standard-2\",\n Zone = \"us-central1-a\",\n Tags = new[]\n {\n \"foo\",\n \"bar\",\n },\n BootDisk = new Gcp.Compute.Inputs.InstanceBootDiskArgs\n {\n InitializeParams = new Gcp.Compute.Inputs.InstanceBootDiskInitializeParamsArgs\n {\n Image = \"debian-cloud/debian-11\",\n Labels = \n {\n { \"my_label\", \"value\" },\n },\n },\n },\n ScratchDisks = new[]\n {\n new Gcp.Compute.Inputs.InstanceScratchDiskArgs\n {\n Interface = \"NVME\",\n },\n },\n Metadata = \n {\n { \"foo\", \"bar\" },\n },\n MetadataStartupScript = \"echo hi \u003e /test.txt\",\n ServiceAccount = new Gcp.Compute.Inputs.InstanceServiceAccountArgs\n {\n Email = @default.Email,\n Scopes = new[]\n {\n \"cloud-platform\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := serviceaccount.NewAccount(ctx, \"default\", \u0026serviceaccount.AccountArgs{\n\t\t\tAccountId: pulumi.String(\"my-custom-sa\"),\n\t\t\tDisplayName: pulumi.String(\"Custom SA for VM Instance\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewInstance(ctx, \"default\", \u0026compute.InstanceArgs{\n\t\t\tNetworkInterfaces: compute.InstanceNetworkInterfaceArray{\n\t\t\t\t\u0026compute.InstanceNetworkInterfaceArgs{\n\t\t\t\t\tAccessConfigs: compute.InstanceNetworkInterfaceAccessConfigArray{\n\t\t\t\t\t\tnil,\n\t\t\t\t\t},\n\t\t\t\t\tNetwork: pulumi.String(\"default\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tName: pulumi.String(\"my-instance\"),\n\t\t\tMachineType: pulumi.String(\"n2-standard-2\"),\n\t\t\tZone: pulumi.String(\"us-central1-a\"),\n\t\t\tTags: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"foo\"),\n\t\t\t\tpulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tBootDisk: \u0026compute.InstanceBootDiskArgs{\n\t\t\t\tInitializeParams: \u0026compute.InstanceBootDiskInitializeParamsArgs{\n\t\t\t\t\tImage: pulumi.String(\"debian-cloud/debian-11\"),\n\t\t\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\t\t\"my_label\": pulumi.String(\"value\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tScratchDisks: compute.InstanceScratchDiskArray{\n\t\t\t\t\u0026compute.InstanceScratchDiskArgs{\n\t\t\t\t\tInterface: pulumi.String(\"NVME\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tMetadata: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tMetadataStartupScript: pulumi.String(\"echo hi \u003e /test.txt\"),\n\t\t\tServiceAccount: \u0026compute.InstanceServiceAccountArgs{\n\t\t\t\tEmail: _default.Email,\n\t\t\t\tScopes: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"cloud-platform\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.serviceaccount.Account;\nimport com.pulumi.gcp.serviceaccount.AccountArgs;\nimport com.pulumi.gcp.compute.Instance;\nimport com.pulumi.gcp.compute.InstanceArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceNetworkInterfaceArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceBootDiskArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceBootDiskInitializeParamsArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceScratchDiskArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceServiceAccountArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Account(\"default\", AccountArgs.builder()\n .accountId(\"my-custom-sa\")\n .displayName(\"Custom SA for VM Instance\")\n .build());\n\n var defaultInstance = new Instance(\"defaultInstance\", InstanceArgs.builder()\n .networkInterfaces(InstanceNetworkInterfaceArgs.builder()\n .accessConfigs()\n .network(\"default\")\n .build())\n .name(\"my-instance\")\n .machineType(\"n2-standard-2\")\n .zone(\"us-central1-a\")\n .tags( \n \"foo\",\n \"bar\")\n .bootDisk(InstanceBootDiskArgs.builder()\n .initializeParams(InstanceBootDiskInitializeParamsArgs.builder()\n .image(\"debian-cloud/debian-11\")\n .labels(Map.of(\"my_label\", \"value\"))\n .build())\n .build())\n .scratchDisks(InstanceScratchDiskArgs.builder()\n .interface_(\"NVME\")\n .build())\n .metadata(Map.of(\"foo\", \"bar\"))\n .metadataStartupScript(\"echo hi \u003e /test.txt\")\n .serviceAccount(InstanceServiceAccountArgs.builder()\n .email(default_.email())\n .scopes(\"cloud-platform\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:serviceaccount:Account\n properties:\n accountId: my-custom-sa\n displayName: Custom SA for VM Instance\n defaultInstance:\n type: gcp:compute:Instance\n name: default\n properties:\n networkInterfaces:\n - accessConfigs:\n - {}\n network: default\n name: my-instance\n machineType: n2-standard-2\n zone: us-central1-a\n tags:\n - foo\n - bar\n bootDisk:\n initializeParams:\n image: debian-cloud/debian-11\n labels:\n my_label: value\n scratchDisks:\n - interface: NVME\n metadata:\n foo: bar\n metadataStartupScript: echo hi \u003e /test.txt\n serviceAccount:\n email: ${default.email}\n scopes:\n - cloud-platform\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nInstances can be imported using any of these accepted formats:\n\n* `projects/{{project}}/zones/{{zone}}/instances/{{name}}`\n\n* `{{project}}/{{zone}}/{{name}}`\n\n* `{{name}}`\n\nWhen using the `pulumi import` command, instances can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:compute/instance:Instance default projects/{{project}}/zones/{{zone}}/instances/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/instance:Instance default {{project}}/{{zone}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/instance:Instance default {{name}}\n```\n\n", + "description": "Manages a VM instance resource within GCE. For more information see\n[the official documentation](https://cloud.google.com/compute/docs/instances)\nand\n[API](https://cloud.google.com/compute/docs/reference/latest/instances).\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.serviceaccount.Account(\"default\", {\n accountId: \"my-custom-sa\",\n displayName: \"Custom SA for VM Instance\",\n});\nconst defaultInstance = new gcp.compute.Instance(\"default\", {\n networkInterfaces: [{\n accessConfigs: [{}],\n network: \"default\",\n }],\n name: \"my-instance\",\n machineType: \"n2-standard-2\",\n zone: \"us-central1-a\",\n tags: [\n \"foo\",\n \"bar\",\n ],\n bootDisk: {\n initializeParams: {\n image: \"debian-cloud/debian-11\",\n labels: {\n my_label: \"value\",\n },\n },\n },\n scratchDisks: [{\n \"interface\": \"NVME\",\n }],\n metadata: {\n foo: \"bar\",\n },\n metadataStartupScript: \"echo hi \u003e /test.txt\",\n serviceAccount: {\n email: _default.email,\n scopes: [\"cloud-platform\"],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.serviceaccount.Account(\"default\",\n account_id=\"my-custom-sa\",\n display_name=\"Custom SA for VM Instance\")\ndefault_instance = gcp.compute.Instance(\"default\",\n network_interfaces=[{\n \"access_configs\": [{}],\n \"network\": \"default\",\n }],\n name=\"my-instance\",\n machine_type=\"n2-standard-2\",\n zone=\"us-central1-a\",\n tags=[\n \"foo\",\n \"bar\",\n ],\n boot_disk={\n \"initialize_params\": {\n \"image\": \"debian-cloud/debian-11\",\n \"labels\": {\n \"my_label\": \"value\",\n },\n },\n },\n scratch_disks=[{\n \"interface\": \"NVME\",\n }],\n metadata={\n \"foo\": \"bar\",\n },\n metadata_startup_script=\"echo hi \u003e /test.txt\",\n service_account={\n \"email\": default.email,\n \"scopes\": [\"cloud-platform\"],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.ServiceAccount.Account(\"default\", new()\n {\n AccountId = \"my-custom-sa\",\n DisplayName = \"Custom SA for VM Instance\",\n });\n\n var defaultInstance = new Gcp.Compute.Instance(\"default\", new()\n {\n NetworkInterfaces = new[]\n {\n new Gcp.Compute.Inputs.InstanceNetworkInterfaceArgs\n {\n AccessConfigs = new[]\n {\n null,\n },\n Network = \"default\",\n },\n },\n Name = \"my-instance\",\n MachineType = \"n2-standard-2\",\n Zone = \"us-central1-a\",\n Tags = new[]\n {\n \"foo\",\n \"bar\",\n },\n BootDisk = new Gcp.Compute.Inputs.InstanceBootDiskArgs\n {\n InitializeParams = new Gcp.Compute.Inputs.InstanceBootDiskInitializeParamsArgs\n {\n Image = \"debian-cloud/debian-11\",\n Labels = \n {\n { \"my_label\", \"value\" },\n },\n },\n },\n ScratchDisks = new[]\n {\n new Gcp.Compute.Inputs.InstanceScratchDiskArgs\n {\n Interface = \"NVME\",\n },\n },\n Metadata = \n {\n { \"foo\", \"bar\" },\n },\n MetadataStartupScript = \"echo hi \u003e /test.txt\",\n ServiceAccount = new Gcp.Compute.Inputs.InstanceServiceAccountArgs\n {\n Email = @default.Email,\n Scopes = new[]\n {\n \"cloud-platform\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := serviceaccount.NewAccount(ctx, \"default\", \u0026serviceaccount.AccountArgs{\n\t\t\tAccountId: pulumi.String(\"my-custom-sa\"),\n\t\t\tDisplayName: pulumi.String(\"Custom SA for VM Instance\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewInstance(ctx, \"default\", \u0026compute.InstanceArgs{\n\t\t\tNetworkInterfaces: compute.InstanceNetworkInterfaceArray{\n\t\t\t\t\u0026compute.InstanceNetworkInterfaceArgs{\n\t\t\t\t\tAccessConfigs: compute.InstanceNetworkInterfaceAccessConfigArray{\n\t\t\t\t\t\tnil,\n\t\t\t\t\t},\n\t\t\t\t\tNetwork: pulumi.String(\"default\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tName: pulumi.String(\"my-instance\"),\n\t\t\tMachineType: pulumi.String(\"n2-standard-2\"),\n\t\t\tZone: pulumi.String(\"us-central1-a\"),\n\t\t\tTags: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"foo\"),\n\t\t\t\tpulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tBootDisk: \u0026compute.InstanceBootDiskArgs{\n\t\t\t\tInitializeParams: \u0026compute.InstanceBootDiskInitializeParamsArgs{\n\t\t\t\t\tImage: pulumi.String(\"debian-cloud/debian-11\"),\n\t\t\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\t\t\"my_label\": pulumi.String(\"value\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tScratchDisks: compute.InstanceScratchDiskArray{\n\t\t\t\t\u0026compute.InstanceScratchDiskArgs{\n\t\t\t\t\tInterface: pulumi.String(\"NVME\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tMetadata: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tMetadataStartupScript: pulumi.String(\"echo hi \u003e /test.txt\"),\n\t\t\tServiceAccount: \u0026compute.InstanceServiceAccountArgs{\n\t\t\t\tEmail: _default.Email,\n\t\t\t\tScopes: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"cloud-platform\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.serviceaccount.Account;\nimport com.pulumi.gcp.serviceaccount.AccountArgs;\nimport com.pulumi.gcp.compute.Instance;\nimport com.pulumi.gcp.compute.InstanceArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceNetworkInterfaceArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceBootDiskArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceBootDiskInitializeParamsArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceScratchDiskArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceServiceAccountArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Account(\"default\", AccountArgs.builder()\n .accountId(\"my-custom-sa\")\n .displayName(\"Custom SA for VM Instance\")\n .build());\n\n var defaultInstance = new Instance(\"defaultInstance\", InstanceArgs.builder()\n .networkInterfaces(InstanceNetworkInterfaceArgs.builder()\n .accessConfigs()\n .network(\"default\")\n .build())\n .name(\"my-instance\")\n .machineType(\"n2-standard-2\")\n .zone(\"us-central1-a\")\n .tags( \n \"foo\",\n \"bar\")\n .bootDisk(InstanceBootDiskArgs.builder()\n .initializeParams(InstanceBootDiskInitializeParamsArgs.builder()\n .image(\"debian-cloud/debian-11\")\n .labels(Map.of(\"my_label\", \"value\"))\n .build())\n .build())\n .scratchDisks(InstanceScratchDiskArgs.builder()\n .interface_(\"NVME\")\n .build())\n .metadata(Map.of(\"foo\", \"bar\"))\n .metadataStartupScript(\"echo hi \u003e /test.txt\")\n .serviceAccount(InstanceServiceAccountArgs.builder()\n .email(default_.email())\n .scopes(\"cloud-platform\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:serviceaccount:Account\n properties:\n accountId: my-custom-sa\n displayName: Custom SA for VM Instance\n defaultInstance:\n type: gcp:compute:Instance\n name: default\n properties:\n networkInterfaces:\n - accessConfigs:\n - {}\n network: default\n name: my-instance\n machineType: n2-standard-2\n zone: us-central1-a\n tags:\n - foo\n - bar\n bootDisk:\n initializeParams:\n image: debian-cloud/debian-11\n labels:\n my_label: value\n scratchDisks:\n - interface: NVME\n metadata:\n foo: bar\n metadataStartupScript: echo hi \u003e /test.txt\n serviceAccount:\n email: ${default.email}\n scopes:\n - cloud-platform\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n\n### Confidential Computing\n\nExample with [Confidential Mode](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) activated.\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.serviceaccount.Account(\"default\", {\n accountId: \"my-custom-sa\",\n displayName: \"Custom SA for VM Instance\",\n});\nconst confidentialInstance = new gcp.compute.Instance(\"confidential_instance\", {\n networkInterfaces: [{\n accessConfigs: [{}],\n network: \"default\",\n }],\n name: \"my-confidential-instance\",\n zone: \"us-central1-a\",\n machineType: \"n2d-standard-2\",\n minCpuPlatform: \"AMD Milan\",\n confidentialInstanceConfig: {\n enableConfidentialCompute: true,\n confidentialInstanceType: \"SEV\",\n },\n bootDisk: {\n initializeParams: {\n image: \"ubuntu-os-cloud/ubuntu-2004-lts\",\n labels: {\n my_label: \"value\",\n },\n },\n },\n scratchDisks: [{\n \"interface\": \"NVME\",\n }],\n serviceAccount: {\n email: _default.email,\n scopes: [\"cloud-platform\"],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.serviceaccount.Account(\"default\",\n account_id=\"my-custom-sa\",\n display_name=\"Custom SA for VM Instance\")\nconfidential_instance = gcp.compute.Instance(\"confidential_instance\",\n network_interfaces=[{\n \"access_configs\": [{}],\n \"network\": \"default\",\n }],\n name=\"my-confidential-instance\",\n zone=\"us-central1-a\",\n machine_type=\"n2d-standard-2\",\n min_cpu_platform=\"AMD Milan\",\n confidential_instance_config={\n \"enable_confidential_compute\": True,\n \"confidential_instance_type\": \"SEV\",\n },\n boot_disk={\n \"initialize_params\": {\n \"image\": \"ubuntu-os-cloud/ubuntu-2004-lts\",\n \"labels\": {\n \"my_label\": \"value\",\n },\n },\n },\n scratch_disks=[{\n \"interface\": \"NVME\",\n }],\n service_account={\n \"email\": default.email,\n \"scopes\": [\"cloud-platform\"],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.ServiceAccount.Account(\"default\", new()\n {\n AccountId = \"my-custom-sa\",\n DisplayName = \"Custom SA for VM Instance\",\n });\n\n var confidentialInstance = new Gcp.Compute.Instance(\"confidential_instance\", new()\n {\n NetworkInterfaces = new[]\n {\n new Gcp.Compute.Inputs.InstanceNetworkInterfaceArgs\n {\n AccessConfigs = new[]\n {\n null,\n },\n Network = \"default\",\n },\n },\n Name = \"my-confidential-instance\",\n Zone = \"us-central1-a\",\n MachineType = \"n2d-standard-2\",\n MinCpuPlatform = \"AMD Milan\",\n ConfidentialInstanceConfig = new Gcp.Compute.Inputs.InstanceConfidentialInstanceConfigArgs\n {\n EnableConfidentialCompute = true,\n ConfidentialInstanceType = \"SEV\",\n },\n BootDisk = new Gcp.Compute.Inputs.InstanceBootDiskArgs\n {\n InitializeParams = new Gcp.Compute.Inputs.InstanceBootDiskInitializeParamsArgs\n {\n Image = \"ubuntu-os-cloud/ubuntu-2004-lts\",\n Labels = \n {\n { \"my_label\", \"value\" },\n },\n },\n },\n ScratchDisks = new[]\n {\n new Gcp.Compute.Inputs.InstanceScratchDiskArgs\n {\n Interface = \"NVME\",\n },\n },\n ServiceAccount = new Gcp.Compute.Inputs.InstanceServiceAccountArgs\n {\n Email = @default.Email,\n Scopes = new[]\n {\n \"cloud-platform\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := serviceaccount.NewAccount(ctx, \"default\", \u0026serviceaccount.AccountArgs{\n\t\t\tAccountId: pulumi.String(\"my-custom-sa\"),\n\t\t\tDisplayName: pulumi.String(\"Custom SA for VM Instance\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewInstance(ctx, \"confidential_instance\", \u0026compute.InstanceArgs{\n\t\t\tNetworkInterfaces: compute.InstanceNetworkInterfaceArray{\n\t\t\t\t\u0026compute.InstanceNetworkInterfaceArgs{\n\t\t\t\t\tAccessConfigs: compute.InstanceNetworkInterfaceAccessConfigArray{\n\t\t\t\t\t\tnil,\n\t\t\t\t\t},\n\t\t\t\t\tNetwork: pulumi.String(\"default\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tName: pulumi.String(\"my-confidential-instance\"),\n\t\t\tZone: pulumi.String(\"us-central1-a\"),\n\t\t\tMachineType: pulumi.String(\"n2d-standard-2\"),\n\t\t\tMinCpuPlatform: pulumi.String(\"AMD Milan\"),\n\t\t\tConfidentialInstanceConfig: \u0026compute.InstanceConfidentialInstanceConfigArgs{\n\t\t\t\tEnableConfidentialCompute: pulumi.Bool(true),\n\t\t\t\tConfidentialInstanceType: pulumi.String(\"SEV\"),\n\t\t\t},\n\t\t\tBootDisk: \u0026compute.InstanceBootDiskArgs{\n\t\t\t\tInitializeParams: \u0026compute.InstanceBootDiskInitializeParamsArgs{\n\t\t\t\t\tImage: pulumi.String(\"ubuntu-os-cloud/ubuntu-2004-lts\"),\n\t\t\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\t\t\"my_label\": pulumi.String(\"value\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tScratchDisks: compute.InstanceScratchDiskArray{\n\t\t\t\t\u0026compute.InstanceScratchDiskArgs{\n\t\t\t\t\tInterface: pulumi.String(\"NVME\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tServiceAccount: \u0026compute.InstanceServiceAccountArgs{\n\t\t\t\tEmail: _default.Email,\n\t\t\t\tScopes: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"cloud-platform\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.serviceaccount.Account;\nimport com.pulumi.gcp.serviceaccount.AccountArgs;\nimport com.pulumi.gcp.compute.Instance;\nimport com.pulumi.gcp.compute.InstanceArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceNetworkInterfaceArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceConfidentialInstanceConfigArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceBootDiskArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceBootDiskInitializeParamsArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceScratchDiskArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceServiceAccountArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Account(\"default\", AccountArgs.builder()\n .accountId(\"my-custom-sa\")\n .displayName(\"Custom SA for VM Instance\")\n .build());\n\n var confidentialInstance = new Instance(\"confidentialInstance\", InstanceArgs.builder()\n .networkInterfaces(InstanceNetworkInterfaceArgs.builder()\n .accessConfigs()\n .network(\"default\")\n .build())\n .name(\"my-confidential-instance\")\n .zone(\"us-central1-a\")\n .machineType(\"n2d-standard-2\")\n .minCpuPlatform(\"AMD Milan\")\n .confidentialInstanceConfig(InstanceConfidentialInstanceConfigArgs.builder()\n .enableConfidentialCompute(true)\n .confidentialInstanceType(\"SEV\")\n .build())\n .bootDisk(InstanceBootDiskArgs.builder()\n .initializeParams(InstanceBootDiskInitializeParamsArgs.builder()\n .image(\"ubuntu-os-cloud/ubuntu-2004-lts\")\n .labels(Map.of(\"my_label\", \"value\"))\n .build())\n .build())\n .scratchDisks(InstanceScratchDiskArgs.builder()\n .interface_(\"NVME\")\n .build())\n .serviceAccount(InstanceServiceAccountArgs.builder()\n .email(default_.email())\n .scopes(\"cloud-platform\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:serviceaccount:Account\n properties:\n accountId: my-custom-sa\n displayName: Custom SA for VM Instance\n confidentialInstance:\n type: gcp:compute:Instance\n name: confidential_instance\n properties:\n networkInterfaces:\n - accessConfigs:\n - {}\n network: default\n name: my-confidential-instance\n zone: us-central1-a\n machineType: n2d-standard-2\n minCpuPlatform: AMD Milan\n confidentialInstanceConfig:\n enableConfidentialCompute: true\n confidentialInstanceType: SEV\n bootDisk:\n initializeParams:\n image: ubuntu-os-cloud/ubuntu-2004-lts\n labels:\n my_label: value\n scratchDisks:\n - interface: NVME\n serviceAccount:\n email: ${default.email}\n scopes:\n - cloud-platform\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nInstances can be imported using any of these accepted formats:\n\n* `projects/{{project}}/zones/{{zone}}/instances/{{name}}`\n\n* `{{project}}/{{zone}}/{{name}}`\n\n* `{{name}}`\n\nWhen using the `pulumi import` command, instances can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:compute/instance:Instance default projects/{{project}}/zones/{{zone}}/instances/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/instance:Instance default {{project}}/{{zone}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/instance:Instance default {{name}}\n```\n\n", "properties": { "advancedMachineFeatures": { "$ref": "#/types/gcp:compute/InstanceAdvancedMachineFeatures:InstanceAdvancedMachineFeatures", @@ -149255,7 +150800,7 @@ }, "currentStatus": { "type": "string", - "description": "The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`,\n" + "description": "The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).\n" }, "deletionProtection": { "type": "boolean", @@ -149644,7 +151189,7 @@ }, "currentStatus": { "type": "string", - "description": "The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`,\n" + "description": "The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).\n" }, "deletionProtection": { "type": "boolean", @@ -152203,7 +153748,7 @@ } }, "gcp:compute/instanceTemplate:InstanceTemplate": { - "description": "\u003e **Note**: Global instance templates can be used in any region. To lower the impact of outages outside your region and gain data residency within your region, use google_compute_region_instance_template.\n\nManages a VM instance template resource within GCE. For more information see\n[the official documentation](https://cloud.google.com/compute/docs/instance-templates)\nand\n[API](https://cloud.google.com/compute/docs/reference/latest/instanceTemplates).\n\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.serviceaccount.Account(\"default\", {\n accountId: \"service-account-id\",\n displayName: \"Service Account\",\n});\nconst myImage = gcp.compute.getImage({\n family: \"debian-11\",\n project: \"debian-cloud\",\n});\nconst foobar = new gcp.compute.Disk(\"foobar\", {\n name: \"existing-disk\",\n image: myImage.then(myImage =\u003e myImage.selfLink),\n size: 10,\n type: \"pd-ssd\",\n zone: \"us-central1-a\",\n});\nconst dailyBackup = new gcp.compute.ResourcePolicy(\"daily_backup\", {\n name: \"every-day-4am\",\n region: \"us-central1\",\n snapshotSchedulePolicy: {\n schedule: {\n dailySchedule: {\n daysInCycle: 1,\n startTime: \"04:00\",\n },\n },\n },\n});\nconst defaultInstanceTemplate = new gcp.compute.InstanceTemplate(\"default\", {\n name: \"appserver-template\",\n description: \"This template is used to create app server instances.\",\n tags: [\n \"foo\",\n \"bar\",\n ],\n labels: {\n environment: \"dev\",\n },\n instanceDescription: \"description assigned to instances\",\n machineType: \"e2-medium\",\n canIpForward: false,\n scheduling: {\n automaticRestart: true,\n onHostMaintenance: \"MIGRATE\",\n },\n disks: [\n {\n sourceImage: \"debian-cloud/debian-11\",\n autoDelete: true,\n boot: true,\n resourcePolicies: dailyBackup.id,\n },\n {\n source: foobar.name,\n autoDelete: false,\n boot: false,\n },\n ],\n networkInterfaces: [{\n network: \"default\",\n }],\n metadata: {\n foo: \"bar\",\n },\n serviceAccount: {\n email: _default.email,\n scopes: [\"cloud-platform\"],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.serviceaccount.Account(\"default\",\n account_id=\"service-account-id\",\n display_name=\"Service Account\")\nmy_image = gcp.compute.get_image(family=\"debian-11\",\n project=\"debian-cloud\")\nfoobar = gcp.compute.Disk(\"foobar\",\n name=\"existing-disk\",\n image=my_image.self_link,\n size=10,\n type=\"pd-ssd\",\n zone=\"us-central1-a\")\ndaily_backup = gcp.compute.ResourcePolicy(\"daily_backup\",\n name=\"every-day-4am\",\n region=\"us-central1\",\n snapshot_schedule_policy={\n \"schedule\": {\n \"daily_schedule\": {\n \"days_in_cycle\": 1,\n \"start_time\": \"04:00\",\n },\n },\n })\ndefault_instance_template = gcp.compute.InstanceTemplate(\"default\",\n name=\"appserver-template\",\n description=\"This template is used to create app server instances.\",\n tags=[\n \"foo\",\n \"bar\",\n ],\n labels={\n \"environment\": \"dev\",\n },\n instance_description=\"description assigned to instances\",\n machine_type=\"e2-medium\",\n can_ip_forward=False,\n scheduling={\n \"automatic_restart\": True,\n \"on_host_maintenance\": \"MIGRATE\",\n },\n disks=[\n {\n \"source_image\": \"debian-cloud/debian-11\",\n \"auto_delete\": True,\n \"boot\": True,\n \"resource_policies\": daily_backup.id,\n },\n {\n \"source\": foobar.name,\n \"auto_delete\": False,\n \"boot\": False,\n },\n ],\n network_interfaces=[{\n \"network\": \"default\",\n }],\n metadata={\n \"foo\": \"bar\",\n },\n service_account={\n \"email\": default.email,\n \"scopes\": [\"cloud-platform\"],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.ServiceAccount.Account(\"default\", new()\n {\n AccountId = \"service-account-id\",\n DisplayName = \"Service Account\",\n });\n\n var myImage = Gcp.Compute.GetImage.Invoke(new()\n {\n Family = \"debian-11\",\n Project = \"debian-cloud\",\n });\n\n var foobar = new Gcp.Compute.Disk(\"foobar\", new()\n {\n Name = \"existing-disk\",\n Image = myImage.Apply(getImageResult =\u003e getImageResult.SelfLink),\n Size = 10,\n Type = \"pd-ssd\",\n Zone = \"us-central1-a\",\n });\n\n var dailyBackup = new Gcp.Compute.ResourcePolicy(\"daily_backup\", new()\n {\n Name = \"every-day-4am\",\n Region = \"us-central1\",\n SnapshotSchedulePolicy = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyArgs\n {\n Schedule = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyScheduleArgs\n {\n DailySchedule = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs\n {\n DaysInCycle = 1,\n StartTime = \"04:00\",\n },\n },\n },\n });\n\n var defaultInstanceTemplate = new Gcp.Compute.InstanceTemplate(\"default\", new()\n {\n Name = \"appserver-template\",\n Description = \"This template is used to create app server instances.\",\n Tags = new[]\n {\n \"foo\",\n \"bar\",\n },\n Labels = \n {\n { \"environment\", \"dev\" },\n },\n InstanceDescription = \"description assigned to instances\",\n MachineType = \"e2-medium\",\n CanIpForward = false,\n Scheduling = new Gcp.Compute.Inputs.InstanceTemplateSchedulingArgs\n {\n AutomaticRestart = true,\n OnHostMaintenance = \"MIGRATE\",\n },\n Disks = new[]\n {\n new Gcp.Compute.Inputs.InstanceTemplateDiskArgs\n {\n SourceImage = \"debian-cloud/debian-11\",\n AutoDelete = true,\n Boot = true,\n ResourcePolicies = dailyBackup.Id,\n },\n new Gcp.Compute.Inputs.InstanceTemplateDiskArgs\n {\n Source = foobar.Name,\n AutoDelete = false,\n Boot = false,\n },\n },\n NetworkInterfaces = new[]\n {\n new Gcp.Compute.Inputs.InstanceTemplateNetworkInterfaceArgs\n {\n Network = \"default\",\n },\n },\n Metadata = \n {\n { \"foo\", \"bar\" },\n },\n ServiceAccount = new Gcp.Compute.Inputs.InstanceTemplateServiceAccountArgs\n {\n Email = @default.Email,\n Scopes = new[]\n {\n \"cloud-platform\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := serviceaccount.NewAccount(ctx, \"default\", \u0026serviceaccount.AccountArgs{\n\t\t\tAccountId: pulumi.String(\"service-account-id\"),\n\t\t\tDisplayName: pulumi.String(\"Service Account\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmyImage, err := compute.LookupImage(ctx, \u0026compute.LookupImageArgs{\n\t\t\tFamily: pulumi.StringRef(\"debian-11\"),\n\t\t\tProject: pulumi.StringRef(\"debian-cloud\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfoobar, err := compute.NewDisk(ctx, \"foobar\", \u0026compute.DiskArgs{\n\t\t\tName: pulumi.String(\"existing-disk\"),\n\t\t\tImage: pulumi.String(myImage.SelfLink),\n\t\t\tSize: pulumi.Int(10),\n\t\t\tType: pulumi.String(\"pd-ssd\"),\n\t\t\tZone: pulumi.String(\"us-central1-a\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdailyBackup, err := compute.NewResourcePolicy(ctx, \"daily_backup\", \u0026compute.ResourcePolicyArgs{\n\t\t\tName: pulumi.String(\"every-day-4am\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tSnapshotSchedulePolicy: \u0026compute.ResourcePolicySnapshotSchedulePolicyArgs{\n\t\t\t\tSchedule: \u0026compute.ResourcePolicySnapshotSchedulePolicyScheduleArgs{\n\t\t\t\t\tDailySchedule: \u0026compute.ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs{\n\t\t\t\t\t\tDaysInCycle: pulumi.Int(1),\n\t\t\t\t\t\tStartTime: pulumi.String(\"04:00\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewInstanceTemplate(ctx, \"default\", \u0026compute.InstanceTemplateArgs{\n\t\t\tName: pulumi.String(\"appserver-template\"),\n\t\t\tDescription: pulumi.String(\"This template is used to create app server instances.\"),\n\t\t\tTags: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"foo\"),\n\t\t\t\tpulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"environment\": pulumi.String(\"dev\"),\n\t\t\t},\n\t\t\tInstanceDescription: pulumi.String(\"description assigned to instances\"),\n\t\t\tMachineType: pulumi.String(\"e2-medium\"),\n\t\t\tCanIpForward: pulumi.Bool(false),\n\t\t\tScheduling: \u0026compute.InstanceTemplateSchedulingArgs{\n\t\t\t\tAutomaticRestart: pulumi.Bool(true),\n\t\t\t\tOnHostMaintenance: pulumi.String(\"MIGRATE\"),\n\t\t\t},\n\t\t\tDisks: compute.InstanceTemplateDiskArray{\n\t\t\t\t\u0026compute.InstanceTemplateDiskArgs{\n\t\t\t\t\tSourceImage: pulumi.String(\"debian-cloud/debian-11\"),\n\t\t\t\t\tAutoDelete: pulumi.Bool(true),\n\t\t\t\t\tBoot: pulumi.Bool(true),\n\t\t\t\t\tResourcePolicies: dailyBackup.ID(),\n\t\t\t\t},\n\t\t\t\t\u0026compute.InstanceTemplateDiskArgs{\n\t\t\t\t\tSource: foobar.Name,\n\t\t\t\t\tAutoDelete: pulumi.Bool(false),\n\t\t\t\t\tBoot: pulumi.Bool(false),\n\t\t\t\t},\n\t\t\t},\n\t\t\tNetworkInterfaces: compute.InstanceTemplateNetworkInterfaceArray{\n\t\t\t\t\u0026compute.InstanceTemplateNetworkInterfaceArgs{\n\t\t\t\t\tNetwork: pulumi.String(\"default\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tMetadata: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tServiceAccount: \u0026compute.InstanceTemplateServiceAccountArgs{\n\t\t\t\tEmail: _default.Email,\n\t\t\t\tScopes: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"cloud-platform\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.serviceaccount.Account;\nimport com.pulumi.gcp.serviceaccount.AccountArgs;\nimport com.pulumi.gcp.compute.ComputeFunctions;\nimport com.pulumi.gcp.compute.inputs.GetImageArgs;\nimport com.pulumi.gcp.compute.Disk;\nimport com.pulumi.gcp.compute.DiskArgs;\nimport com.pulumi.gcp.compute.ResourcePolicy;\nimport com.pulumi.gcp.compute.ResourcePolicyArgs;\nimport com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyArgs;\nimport com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyScheduleArgs;\nimport com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs;\nimport com.pulumi.gcp.compute.InstanceTemplate;\nimport com.pulumi.gcp.compute.InstanceTemplateArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateSchedulingArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateDiskArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateNetworkInterfaceArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateServiceAccountArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Account(\"default\", AccountArgs.builder()\n .accountId(\"service-account-id\")\n .displayName(\"Service Account\")\n .build());\n\n final var myImage = ComputeFunctions.getImage(GetImageArgs.builder()\n .family(\"debian-11\")\n .project(\"debian-cloud\")\n .build());\n\n var foobar = new Disk(\"foobar\", DiskArgs.builder()\n .name(\"existing-disk\")\n .image(myImage.applyValue(getImageResult -\u003e getImageResult.selfLink()))\n .size(10)\n .type(\"pd-ssd\")\n .zone(\"us-central1-a\")\n .build());\n\n var dailyBackup = new ResourcePolicy(\"dailyBackup\", ResourcePolicyArgs.builder()\n .name(\"every-day-4am\")\n .region(\"us-central1\")\n .snapshotSchedulePolicy(ResourcePolicySnapshotSchedulePolicyArgs.builder()\n .schedule(ResourcePolicySnapshotSchedulePolicyScheduleArgs.builder()\n .dailySchedule(ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs.builder()\n .daysInCycle(1)\n .startTime(\"04:00\")\n .build())\n .build())\n .build())\n .build());\n\n var defaultInstanceTemplate = new InstanceTemplate(\"defaultInstanceTemplate\", InstanceTemplateArgs.builder()\n .name(\"appserver-template\")\n .description(\"This template is used to create app server instances.\")\n .tags( \n \"foo\",\n \"bar\")\n .labels(Map.of(\"environment\", \"dev\"))\n .instanceDescription(\"description assigned to instances\")\n .machineType(\"e2-medium\")\n .canIpForward(false)\n .scheduling(InstanceTemplateSchedulingArgs.builder()\n .automaticRestart(true)\n .onHostMaintenance(\"MIGRATE\")\n .build())\n .disks( \n InstanceTemplateDiskArgs.builder()\n .sourceImage(\"debian-cloud/debian-11\")\n .autoDelete(true)\n .boot(true)\n .resourcePolicies(dailyBackup.id())\n .build(),\n InstanceTemplateDiskArgs.builder()\n .source(foobar.name())\n .autoDelete(false)\n .boot(false)\n .build())\n .networkInterfaces(InstanceTemplateNetworkInterfaceArgs.builder()\n .network(\"default\")\n .build())\n .metadata(Map.of(\"foo\", \"bar\"))\n .serviceAccount(InstanceTemplateServiceAccountArgs.builder()\n .email(default_.email())\n .scopes(\"cloud-platform\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:serviceaccount:Account\n properties:\n accountId: service-account-id\n displayName: Service Account\n defaultInstanceTemplate:\n type: gcp:compute:InstanceTemplate\n name: default\n properties:\n name: appserver-template\n description: This template is used to create app server instances.\n tags:\n - foo\n - bar\n labels:\n environment: dev\n instanceDescription: description assigned to instances\n machineType: e2-medium\n canIpForward: false\n scheduling:\n automaticRestart: true\n onHostMaintenance: MIGRATE\n disks:\n - sourceImage: debian-cloud/debian-11\n autoDelete: true\n boot: true\n resourcePolicies: ${dailyBackup.id}\n - source: ${foobar.name}\n autoDelete: false\n boot: false\n networkInterfaces:\n - network: default\n metadata:\n foo: bar\n serviceAccount:\n email: ${default.email}\n scopes:\n - cloud-platform\n foobar:\n type: gcp:compute:Disk\n properties:\n name: existing-disk\n image: ${myImage.selfLink}\n size: 10\n type: pd-ssd\n zone: us-central1-a\n dailyBackup:\n type: gcp:compute:ResourcePolicy\n name: daily_backup\n properties:\n name: every-day-4am\n region: us-central1\n snapshotSchedulePolicy:\n schedule:\n dailySchedule:\n daysInCycle: 1\n startTime: 04:00\nvariables:\n myImage:\n fn::invoke:\n Function: gcp:compute:getImage\n Arguments:\n family: debian-11\n project: debian-cloud\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Automatic Envoy Deployment\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst default = gcp.compute.getDefaultServiceAccount({});\nconst myImage = gcp.compute.getImage({\n family: \"debian-11\",\n project: \"debian-cloud\",\n});\nconst foobar = new gcp.compute.InstanceTemplate(\"foobar\", {\n name: \"appserver-template\",\n machineType: \"e2-medium\",\n canIpForward: false,\n tags: [\n \"foo\",\n \"bar\",\n ],\n disks: [{\n sourceImage: myImage.then(myImage =\u003e myImage.selfLink),\n autoDelete: true,\n boot: true,\n }],\n networkInterfaces: [{\n network: \"default\",\n }],\n scheduling: {\n preemptible: false,\n automaticRestart: true,\n },\n metadata: {\n \"gce-software-declaration\": `{\n \"softwareRecipes\": [{\n \"name\": \"install-gce-service-proxy-agent\",\n \"desired_state\": \"INSTALLED\",\n \"installSteps\": [{\n \"scriptRun\": {\n \"script\": \"#! /bin/bash\\\\nZONE=(curl --silent http://metadata.google.internal/computeMetadata/v1/instance/zone -H Metadata-Flavor:Google | cut -d/ -f4 )\\\\nexport SERVICE_PROXY_AGENT_DIRECTORY=(mktemp -d)\\\\nsudo gsutil cp gs://gce-service-proxy-\"ZONE\"/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"SERVICE_PROXY_AGENT_DIRECTORY\" || sudo gsutil cp gs://gce-service-proxy/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"SERVICE_PROXY_AGENT_DIRECTORY\"\\\\nsudo tar -xzf \"SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent-0.2.tgz -C \"SERVICE_PROXY_AGENT_DIRECTORY\"\\\\n\"SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent/service-proxy-agent-bootstrap.sh\"\n }\n }]\n }]\n}\n`,\n \"gce-service-proxy\": `{\n \"api-version\": \"0.2\",\n \"proxy-spec\": {\n \"proxy-port\": 15001,\n \"network\": \"my-network\",\n \"tracing\": \"ON\",\n \"access-log\": \"/var/log/envoy/access.log\"\n }\n \"service\": {\n \"serving-ports\": [80, 81]\n },\n \"labels\": {\n \"app_name\": \"bookserver_app\",\n \"app_version\": \"STABLE\"\n }\n}\n`,\n \"enable-guest-attributes\": \"true\",\n \"enable-osconfig\": \"true\",\n },\n serviceAccount: {\n email: _default.then(_default =\u003e _default.email),\n scopes: [\"cloud-platform\"],\n },\n labels: {\n \"gce-service-proxy\": \"on\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.compute.get_default_service_account()\nmy_image = gcp.compute.get_image(family=\"debian-11\",\n project=\"debian-cloud\")\nfoobar = gcp.compute.InstanceTemplate(\"foobar\",\n name=\"appserver-template\",\n machine_type=\"e2-medium\",\n can_ip_forward=False,\n tags=[\n \"foo\",\n \"bar\",\n ],\n disks=[{\n \"source_image\": my_image.self_link,\n \"auto_delete\": True,\n \"boot\": True,\n }],\n network_interfaces=[{\n \"network\": \"default\",\n }],\n scheduling={\n \"preemptible\": False,\n \"automatic_restart\": True,\n },\n metadata={\n \"gce-software-declaration\": \"\"\"{\n \"softwareRecipes\": [{\n \"name\": \"install-gce-service-proxy-agent\",\n \"desired_state\": \"INSTALLED\",\n \"installSteps\": [{\n \"scriptRun\": {\n \"script\": \"#! /bin/bash\\nZONE=$(curl --silent http://metadata.google.internal/computeMetadata/v1/instance/zone -H Metadata-Flavor:Google | cut -d/ -f4 )\\nexport SERVICE_PROXY_AGENT_DIRECTORY=$(mktemp -d)\\nsudo gsutil cp gs://gce-service-proxy-\"$ZONE\"/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\" || sudo gsutil cp gs://gce-service-proxy/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\nsudo tar -xzf \"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent-0.2.tgz -C \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\n\"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent/service-proxy-agent-bootstrap.sh\"\n }\n }]\n }]\n}\n\"\"\",\n \"gce-service-proxy\": \"\"\"{\n \"api-version\": \"0.2\",\n \"proxy-spec\": {\n \"proxy-port\": 15001,\n \"network\": \"my-network\",\n \"tracing\": \"ON\",\n \"access-log\": \"/var/log/envoy/access.log\"\n }\n \"service\": {\n \"serving-ports\": [80, 81]\n },\n \"labels\": {\n \"app_name\": \"bookserver_app\",\n \"app_version\": \"STABLE\"\n }\n}\n\"\"\",\n \"enable-guest-attributes\": \"true\",\n \"enable-osconfig\": \"true\",\n },\n service_account={\n \"email\": default.email,\n \"scopes\": [\"cloud-platform\"],\n },\n labels={\n \"gce-service-proxy\": \"on\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = Gcp.Compute.GetDefaultServiceAccount.Invoke();\n\n var myImage = Gcp.Compute.GetImage.Invoke(new()\n {\n Family = \"debian-11\",\n Project = \"debian-cloud\",\n });\n\n var foobar = new Gcp.Compute.InstanceTemplate(\"foobar\", new()\n {\n Name = \"appserver-template\",\n MachineType = \"e2-medium\",\n CanIpForward = false,\n Tags = new[]\n {\n \"foo\",\n \"bar\",\n },\n Disks = new[]\n {\n new Gcp.Compute.Inputs.InstanceTemplateDiskArgs\n {\n SourceImage = myImage.Apply(getImageResult =\u003e getImageResult.SelfLink),\n AutoDelete = true,\n Boot = true,\n },\n },\n NetworkInterfaces = new[]\n {\n new Gcp.Compute.Inputs.InstanceTemplateNetworkInterfaceArgs\n {\n Network = \"default\",\n },\n },\n Scheduling = new Gcp.Compute.Inputs.InstanceTemplateSchedulingArgs\n {\n Preemptible = false,\n AutomaticRestart = true,\n },\n Metadata = \n {\n { \"gce-software-declaration\", @\"{\n \"\"softwareRecipes\"\": [{\n \"\"name\"\": \"\"install-gce-service-proxy-agent\"\",\n \"\"desired_state\"\": \"\"INSTALLED\"\",\n \"\"installSteps\"\": [{\n \"\"scriptRun\"\": {\n \"\"script\"\": \"\"#! /bin/bash\\nZONE=$(curl --silent http://metadata.google.internal/computeMetadata/v1/instance/zone -H Metadata-Flavor:Google | cut -d/ -f4 )\\nexport SERVICE_PROXY_AGENT_DIRECTORY=$(mktemp -d)\\nsudo gsutil cp gs://gce-service-proxy-\"\"$ZONE\"\"/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"\"$SERVICE_PROXY_AGENT_DIRECTORY\"\" || sudo gsutil cp gs://gce-service-proxy/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"\"$SERVICE_PROXY_AGENT_DIRECTORY\"\"\\nsudo tar -xzf \"\"$SERVICE_PROXY_AGENT_DIRECTORY\"\"/service-proxy-agent-0.2.tgz -C \"\"$SERVICE_PROXY_AGENT_DIRECTORY\"\"\\n\"\"$SERVICE_PROXY_AGENT_DIRECTORY\"\"/service-proxy-agent/service-proxy-agent-bootstrap.sh\"\"\n }\n }]\n }]\n}\n\" },\n { \"gce-service-proxy\", @\"{\n \"\"api-version\"\": \"\"0.2\"\",\n \"\"proxy-spec\"\": {\n \"\"proxy-port\"\": 15001,\n \"\"network\"\": \"\"my-network\"\",\n \"\"tracing\"\": \"\"ON\"\",\n \"\"access-log\"\": \"\"/var/log/envoy/access.log\"\"\n }\n \"\"service\"\": {\n \"\"serving-ports\"\": [80, 81]\n },\n \"\"labels\"\": {\n \"\"app_name\"\": \"\"bookserver_app\"\",\n \"\"app_version\"\": \"\"STABLE\"\"\n }\n}\n\" },\n { \"enable-guest-attributes\", \"true\" },\n { \"enable-osconfig\", \"true\" },\n },\n ServiceAccount = new Gcp.Compute.Inputs.InstanceTemplateServiceAccountArgs\n {\n Email = @default.Apply(@default =\u003e @default.Apply(getDefaultServiceAccountResult =\u003e getDefaultServiceAccountResult.Email)),\n Scopes = new[]\n {\n \"cloud-platform\",\n },\n },\n Labels = \n {\n { \"gce-service-proxy\", \"on\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_default, err := compute.GetDefaultServiceAccount(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmyImage, err := compute.LookupImage(ctx, \u0026compute.LookupImageArgs{\n\t\t\tFamily: pulumi.StringRef(\"debian-11\"),\n\t\t\tProject: pulumi.StringRef(\"debian-cloud\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewInstanceTemplate(ctx, \"foobar\", \u0026compute.InstanceTemplateArgs{\n\t\t\tName: pulumi.String(\"appserver-template\"),\n\t\t\tMachineType: pulumi.String(\"e2-medium\"),\n\t\t\tCanIpForward: pulumi.Bool(false),\n\t\t\tTags: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"foo\"),\n\t\t\t\tpulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tDisks: compute.InstanceTemplateDiskArray{\n\t\t\t\t\u0026compute.InstanceTemplateDiskArgs{\n\t\t\t\t\tSourceImage: pulumi.String(myImage.SelfLink),\n\t\t\t\t\tAutoDelete: pulumi.Bool(true),\n\t\t\t\t\tBoot: pulumi.Bool(true),\n\t\t\t\t},\n\t\t\t},\n\t\t\tNetworkInterfaces: compute.InstanceTemplateNetworkInterfaceArray{\n\t\t\t\t\u0026compute.InstanceTemplateNetworkInterfaceArgs{\n\t\t\t\t\tNetwork: pulumi.String(\"default\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tScheduling: \u0026compute.InstanceTemplateSchedulingArgs{\n\t\t\t\tPreemptible: pulumi.Bool(false),\n\t\t\t\tAutomaticRestart: pulumi.Bool(true),\n\t\t\t},\n\t\t\tMetadata: pulumi.StringMap{\n\t\t\t\t\"gce-software-declaration\": pulumi.String(`{\n \"softwareRecipes\": [{\n \"name\": \"install-gce-service-proxy-agent\",\n \"desired_state\": \"INSTALLED\",\n \"installSteps\": [{\n \"scriptRun\": {\n \"script\": \"#! /bin/bash\\nZONE=$(curl --silent http://metadata.google.internal/computeMetadata/v1/instance/zone -H Metadata-Flavor:Google | cut -d/ -f4 )\\nexport SERVICE_PROXY_AGENT_DIRECTORY=$(mktemp -d)\\nsudo gsutil cp gs://gce-service-proxy-\"$ZONE\"/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\" || sudo gsutil cp gs://gce-service-proxy/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\nsudo tar -xzf \"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent-0.2.tgz -C \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\n\"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent/service-proxy-agent-bootstrap.sh\"\n }\n }]\n }]\n}\n`),\n\t\t\t\t\"gce-service-proxy\": pulumi.String(`{\n \"api-version\": \"0.2\",\n \"proxy-spec\": {\n \"proxy-port\": 15001,\n \"network\": \"my-network\",\n \"tracing\": \"ON\",\n \"access-log\": \"/var/log/envoy/access.log\"\n }\n \"service\": {\n \"serving-ports\": [80, 81]\n },\n \"labels\": {\n \"app_name\": \"bookserver_app\",\n \"app_version\": \"STABLE\"\n }\n}\n`),\n\t\t\t\t\"enable-guest-attributes\": pulumi.String(\"true\"),\n\t\t\t\t\"enable-osconfig\": pulumi.String(\"true\"),\n\t\t\t},\n\t\t\tServiceAccount: \u0026compute.InstanceTemplateServiceAccountArgs{\n\t\t\t\tEmail: pulumi.String(_default.Email),\n\t\t\t\tScopes: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"cloud-platform\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"gce-service-proxy\": pulumi.String(\"on\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.ComputeFunctions;\nimport com.pulumi.gcp.compute.inputs.GetDefaultServiceAccountArgs;\nimport com.pulumi.gcp.compute.inputs.GetImageArgs;\nimport com.pulumi.gcp.compute.InstanceTemplate;\nimport com.pulumi.gcp.compute.InstanceTemplateArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateDiskArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateNetworkInterfaceArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateSchedulingArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateServiceAccountArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var default = ComputeFunctions.getDefaultServiceAccount();\n\n final var myImage = ComputeFunctions.getImage(GetImageArgs.builder()\n .family(\"debian-11\")\n .project(\"debian-cloud\")\n .build());\n\n var foobar = new InstanceTemplate(\"foobar\", InstanceTemplateArgs.builder()\n .name(\"appserver-template\")\n .machineType(\"e2-medium\")\n .canIpForward(false)\n .tags( \n \"foo\",\n \"bar\")\n .disks(InstanceTemplateDiskArgs.builder()\n .sourceImage(myImage.applyValue(getImageResult -\u003e getImageResult.selfLink()))\n .autoDelete(true)\n .boot(true)\n .build())\n .networkInterfaces(InstanceTemplateNetworkInterfaceArgs.builder()\n .network(\"default\")\n .build())\n .scheduling(InstanceTemplateSchedulingArgs.builder()\n .preemptible(false)\n .automaticRestart(true)\n .build())\n .metadata(Map.ofEntries(\n Map.entry(\"gce-software-declaration\", \"\"\"\n{\n \"softwareRecipes\": [{\n \"name\": \"install-gce-service-proxy-agent\",\n \"desired_state\": \"INSTALLED\",\n \"installSteps\": [{\n \"scriptRun\": {\n \"script\": \"#! /bin/bash\\nZONE=$(curl --silent http://metadata.google.internal/computeMetadata/v1/instance/zone -H Metadata-Flavor:Google | cut -d/ -f4 )\\nexport SERVICE_PROXY_AGENT_DIRECTORY=$(mktemp -d)\\nsudo gsutil cp gs://gce-service-proxy-\"$ZONE\"/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\" || sudo gsutil cp gs://gce-service-proxy/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\nsudo tar -xzf \"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent-0.2.tgz -C \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\n\"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent/service-proxy-agent-bootstrap.sh\"\n }\n }]\n }]\n}\n \"\"\"),\n Map.entry(\"gce-service-proxy\", \"\"\"\n{\n \"api-version\": \"0.2\",\n \"proxy-spec\": {\n \"proxy-port\": 15001,\n \"network\": \"my-network\",\n \"tracing\": \"ON\",\n \"access-log\": \"/var/log/envoy/access.log\"\n }\n \"service\": {\n \"serving-ports\": [80, 81]\n },\n \"labels\": {\n \"app_name\": \"bookserver_app\",\n \"app_version\": \"STABLE\"\n }\n}\n \"\"\"),\n Map.entry(\"enable-guest-attributes\", \"true\"),\n Map.entry(\"enable-osconfig\", \"true\")\n ))\n .serviceAccount(InstanceTemplateServiceAccountArgs.builder()\n .email(default_.email())\n .scopes(\"cloud-platform\")\n .build())\n .labels(Map.of(\"gce-service-proxy\", \"on\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n foobar:\n type: gcp:compute:InstanceTemplate\n properties:\n name: appserver-template\n machineType: e2-medium\n canIpForward: false\n tags:\n - foo\n - bar\n disks:\n - sourceImage: ${myImage.selfLink}\n autoDelete: true\n boot: true\n networkInterfaces:\n - network: default\n scheduling:\n preemptible: false\n automaticRestart: true\n metadata:\n gce-software-declaration: |\n {\n \"softwareRecipes\": [{\n \"name\": \"install-gce-service-proxy-agent\",\n \"desired_state\": \"INSTALLED\",\n \"installSteps\": [{\n \"scriptRun\": {\n \"script\": \"#! /bin/bash\\nZONE=$(curl --silent http://metadata.google.internal/computeMetadata/v1/instance/zone -H Metadata-Flavor:Google | cut -d/ -f4 )\\nexport SERVICE_PROXY_AGENT_DIRECTORY=$(mktemp -d)\\nsudo gsutil cp gs://gce-service-proxy-\"$ZONE\"/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\" || sudo gsutil cp gs://gce-service-proxy/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\nsudo tar -xzf \"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent-0.2.tgz -C \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\n\"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent/service-proxy-agent-bootstrap.sh\"\n }\n }]\n }]\n }\n gce-service-proxy: |\n {\n \"api-version\": \"0.2\",\n \"proxy-spec\": {\n \"proxy-port\": 15001,\n \"network\": \"my-network\",\n \"tracing\": \"ON\",\n \"access-log\": \"/var/log/envoy/access.log\"\n }\n \"service\": {\n \"serving-ports\": [80, 81]\n },\n \"labels\": {\n \"app_name\": \"bookserver_app\",\n \"app_version\": \"STABLE\"\n }\n }\n enable-guest-attributes: 'true'\n enable-osconfig: 'true'\n serviceAccount:\n email: ${default.email}\n scopes:\n - cloud-platform\n labels:\n gce-service-proxy: on\nvariables:\n default:\n fn::invoke:\n Function: gcp:compute:getDefaultServiceAccount\n Arguments: {}\n myImage:\n fn::invoke:\n Function: gcp:compute:getImage\n Arguments:\n family: debian-11\n project: debian-cloud\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Deploying the Latest Image\n\nA common way to use instance templates and managed instance groups is to deploy the\nlatest image in a family, usually the latest build of your application. There are two\nways to do this in the provider, and they have their pros and cons. The difference ends\nup being in how \"latest\" is interpreted. You can either deploy the latest image available\nwhen the provider runs, or you can have each instance check what the latest image is when\nit's being created, either as part of a scaling event or being rebuilt by the instance\ngroup manager.\n\nIf you're not sure, we recommend deploying the latest image available when the provider runs,\nbecause this means all the instances in your group will be based on the same image, always,\nand means that no upgrades or changes to your instances happen outside of a `pulumi up`.\nYou can achieve this by using the `gcp.compute.Image`\ndata source, which will retrieve the latest image on every `pulumi apply`, and will update\nthe template to use that specific image:\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst myImage = gcp.compute.getImage({\n family: \"debian-11\",\n project: \"debian-cloud\",\n});\nconst instanceTemplate = new gcp.compute.InstanceTemplate(\"instance_template\", {\n namePrefix: \"instance-template-\",\n machineType: \"e2-medium\",\n region: \"us-central1\",\n disks: [{\n sourceImage: myImage.then(myImage =\u003e myImage.selfLink),\n }],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nmy_image = gcp.compute.get_image(family=\"debian-11\",\n project=\"debian-cloud\")\ninstance_template = gcp.compute.InstanceTemplate(\"instance_template\",\n name_prefix=\"instance-template-\",\n machine_type=\"e2-medium\",\n region=\"us-central1\",\n disks=[{\n \"source_image\": my_image.self_link,\n }])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myImage = Gcp.Compute.GetImage.Invoke(new()\n {\n Family = \"debian-11\",\n Project = \"debian-cloud\",\n });\n\n var instanceTemplate = new Gcp.Compute.InstanceTemplate(\"instance_template\", new()\n {\n NamePrefix = \"instance-template-\",\n MachineType = \"e2-medium\",\n Region = \"us-central1\",\n Disks = new[]\n {\n new Gcp.Compute.Inputs.InstanceTemplateDiskArgs\n {\n SourceImage = myImage.Apply(getImageResult =\u003e getImageResult.SelfLink),\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tmyImage, err := compute.LookupImage(ctx, \u0026compute.LookupImageArgs{\n\t\t\tFamily: pulumi.StringRef(\"debian-11\"),\n\t\t\tProject: pulumi.StringRef(\"debian-cloud\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewInstanceTemplate(ctx, \"instance_template\", \u0026compute.InstanceTemplateArgs{\n\t\t\tNamePrefix: pulumi.String(\"instance-template-\"),\n\t\t\tMachineType: pulumi.String(\"e2-medium\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tDisks: compute.InstanceTemplateDiskArray{\n\t\t\t\t\u0026compute.InstanceTemplateDiskArgs{\n\t\t\t\t\tSourceImage: pulumi.String(myImage.SelfLink),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.ComputeFunctions;\nimport com.pulumi.gcp.compute.inputs.GetImageArgs;\nimport com.pulumi.gcp.compute.InstanceTemplate;\nimport com.pulumi.gcp.compute.InstanceTemplateArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateDiskArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var myImage = ComputeFunctions.getImage(GetImageArgs.builder()\n .family(\"debian-11\")\n .project(\"debian-cloud\")\n .build());\n\n var instanceTemplate = new InstanceTemplate(\"instanceTemplate\", InstanceTemplateArgs.builder()\n .namePrefix(\"instance-template-\")\n .machineType(\"e2-medium\")\n .region(\"us-central1\")\n .disks(InstanceTemplateDiskArgs.builder()\n .sourceImage(myImage.applyValue(getImageResult -\u003e getImageResult.selfLink()))\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instanceTemplate:\n type: gcp:compute:InstanceTemplate\n name: instance_template\n properties:\n namePrefix: instance-template-\n machineType: e2-medium\n region: us-central1\n disks:\n - sourceImage: ${myImage.selfLink}\nvariables:\n myImage:\n fn::invoke:\n Function: gcp:compute:getImage\n Arguments:\n family: debian-11\n project: debian-cloud\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\nTo have instances update to the latest on every scaling event or instance re-creation,\nuse the family as the image for the disk, and it will use GCP's default behavior, setting\nthe image for the template to the family:\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst instanceTemplate = new gcp.compute.InstanceTemplate(\"instance_template\", {\n namePrefix: \"instance-template-\",\n machineType: \"e2-medium\",\n region: \"us-central1\",\n disks: [{\n sourceImage: \"debian-cloud/debian-11\",\n }],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ninstance_template = gcp.compute.InstanceTemplate(\"instance_template\",\n name_prefix=\"instance-template-\",\n machine_type=\"e2-medium\",\n region=\"us-central1\",\n disks=[{\n \"source_image\": \"debian-cloud/debian-11\",\n }])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var instanceTemplate = new Gcp.Compute.InstanceTemplate(\"instance_template\", new()\n {\n NamePrefix = \"instance-template-\",\n MachineType = \"e2-medium\",\n Region = \"us-central1\",\n Disks = new[]\n {\n new Gcp.Compute.Inputs.InstanceTemplateDiskArgs\n {\n SourceImage = \"debian-cloud/debian-11\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewInstanceTemplate(ctx, \"instance_template\", \u0026compute.InstanceTemplateArgs{\n\t\t\tNamePrefix: pulumi.String(\"instance-template-\"),\n\t\t\tMachineType: pulumi.String(\"e2-medium\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tDisks: compute.InstanceTemplateDiskArray{\n\t\t\t\t\u0026compute.InstanceTemplateDiskArgs{\n\t\t\t\t\tSourceImage: pulumi.String(\"debian-cloud/debian-11\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.InstanceTemplate;\nimport com.pulumi.gcp.compute.InstanceTemplateArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateDiskArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var instanceTemplate = new InstanceTemplate(\"instanceTemplate\", InstanceTemplateArgs.builder()\n .namePrefix(\"instance-template-\")\n .machineType(\"e2-medium\")\n .region(\"us-central1\")\n .disks(InstanceTemplateDiskArgs.builder()\n .sourceImage(\"debian-cloud/debian-11\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instanceTemplate:\n type: gcp:compute:InstanceTemplate\n name: instance_template\n properties:\n namePrefix: instance-template-\n machineType: e2-medium\n region: us-central1\n disks:\n - sourceImage: debian-cloud/debian-11\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nInstance templates can be imported using any of these accepted formats:\n\n* `projects/{{project}}/global/instanceTemplates/{{name}}`\n\n* `{{project}}/{{name}}`\n\n* `{{name}}`\n\nWhen using the `pulumi import` command, instance templates can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:compute/instanceTemplate:InstanceTemplate default projects/{{project}}/global/instanceTemplates/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/instanceTemplate:InstanceTemplate default {{project}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/instanceTemplate:InstanceTemplate default {{name}}\n```\n\n", + "description": "\u003e **Note**: Global instance templates can be used in any region. To lower the impact of outages outside your region and gain data residency within your region, use google_compute_region_instance_template.\n\nManages a VM instance template resource within GCE. For more information see\n[the official documentation](https://cloud.google.com/compute/docs/instance-templates)\nand\n[API](https://cloud.google.com/compute/docs/reference/latest/instanceTemplates).\n\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.serviceaccount.Account(\"default\", {\n accountId: \"service-account-id\",\n displayName: \"Service Account\",\n});\nconst myImage = gcp.compute.getImage({\n family: \"debian-11\",\n project: \"debian-cloud\",\n});\nconst foobar = new gcp.compute.Disk(\"foobar\", {\n name: \"existing-disk\",\n image: myImage.then(myImage =\u003e myImage.selfLink),\n size: 10,\n type: \"pd-ssd\",\n zone: \"us-central1-a\",\n});\nconst dailyBackup = new gcp.compute.ResourcePolicy(\"daily_backup\", {\n name: \"every-day-4am\",\n region: \"us-central1\",\n snapshotSchedulePolicy: {\n schedule: {\n dailySchedule: {\n daysInCycle: 1,\n startTime: \"04:00\",\n },\n },\n },\n});\nconst defaultInstanceTemplate = new gcp.compute.InstanceTemplate(\"default\", {\n name: \"appserver-template\",\n description: \"This template is used to create app server instances.\",\n tags: [\n \"foo\",\n \"bar\",\n ],\n labels: {\n environment: \"dev\",\n },\n instanceDescription: \"description assigned to instances\",\n machineType: \"e2-medium\",\n canIpForward: false,\n scheduling: {\n automaticRestart: true,\n onHostMaintenance: \"MIGRATE\",\n },\n disks: [\n {\n sourceImage: \"debian-cloud/debian-11\",\n autoDelete: true,\n boot: true,\n resourcePolicies: dailyBackup.id,\n },\n {\n source: foobar.name,\n autoDelete: false,\n boot: false,\n },\n ],\n networkInterfaces: [{\n network: \"default\",\n }],\n metadata: {\n foo: \"bar\",\n },\n serviceAccount: {\n email: _default.email,\n scopes: [\"cloud-platform\"],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.serviceaccount.Account(\"default\",\n account_id=\"service-account-id\",\n display_name=\"Service Account\")\nmy_image = gcp.compute.get_image(family=\"debian-11\",\n project=\"debian-cloud\")\nfoobar = gcp.compute.Disk(\"foobar\",\n name=\"existing-disk\",\n image=my_image.self_link,\n size=10,\n type=\"pd-ssd\",\n zone=\"us-central1-a\")\ndaily_backup = gcp.compute.ResourcePolicy(\"daily_backup\",\n name=\"every-day-4am\",\n region=\"us-central1\",\n snapshot_schedule_policy={\n \"schedule\": {\n \"daily_schedule\": {\n \"days_in_cycle\": 1,\n \"start_time\": \"04:00\",\n },\n },\n })\ndefault_instance_template = gcp.compute.InstanceTemplate(\"default\",\n name=\"appserver-template\",\n description=\"This template is used to create app server instances.\",\n tags=[\n \"foo\",\n \"bar\",\n ],\n labels={\n \"environment\": \"dev\",\n },\n instance_description=\"description assigned to instances\",\n machine_type=\"e2-medium\",\n can_ip_forward=False,\n scheduling={\n \"automatic_restart\": True,\n \"on_host_maintenance\": \"MIGRATE\",\n },\n disks=[\n {\n \"source_image\": \"debian-cloud/debian-11\",\n \"auto_delete\": True,\n \"boot\": True,\n \"resource_policies\": daily_backup.id,\n },\n {\n \"source\": foobar.name,\n \"auto_delete\": False,\n \"boot\": False,\n },\n ],\n network_interfaces=[{\n \"network\": \"default\",\n }],\n metadata={\n \"foo\": \"bar\",\n },\n service_account={\n \"email\": default.email,\n \"scopes\": [\"cloud-platform\"],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.ServiceAccount.Account(\"default\", new()\n {\n AccountId = \"service-account-id\",\n DisplayName = \"Service Account\",\n });\n\n var myImage = Gcp.Compute.GetImage.Invoke(new()\n {\n Family = \"debian-11\",\n Project = \"debian-cloud\",\n });\n\n var foobar = new Gcp.Compute.Disk(\"foobar\", new()\n {\n Name = \"existing-disk\",\n Image = myImage.Apply(getImageResult =\u003e getImageResult.SelfLink),\n Size = 10,\n Type = \"pd-ssd\",\n Zone = \"us-central1-a\",\n });\n\n var dailyBackup = new Gcp.Compute.ResourcePolicy(\"daily_backup\", new()\n {\n Name = \"every-day-4am\",\n Region = \"us-central1\",\n SnapshotSchedulePolicy = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyArgs\n {\n Schedule = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyScheduleArgs\n {\n DailySchedule = new Gcp.Compute.Inputs.ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs\n {\n DaysInCycle = 1,\n StartTime = \"04:00\",\n },\n },\n },\n });\n\n var defaultInstanceTemplate = new Gcp.Compute.InstanceTemplate(\"default\", new()\n {\n Name = \"appserver-template\",\n Description = \"This template is used to create app server instances.\",\n Tags = new[]\n {\n \"foo\",\n \"bar\",\n },\n Labels = \n {\n { \"environment\", \"dev\" },\n },\n InstanceDescription = \"description assigned to instances\",\n MachineType = \"e2-medium\",\n CanIpForward = false,\n Scheduling = new Gcp.Compute.Inputs.InstanceTemplateSchedulingArgs\n {\n AutomaticRestart = true,\n OnHostMaintenance = \"MIGRATE\",\n },\n Disks = new[]\n {\n new Gcp.Compute.Inputs.InstanceTemplateDiskArgs\n {\n SourceImage = \"debian-cloud/debian-11\",\n AutoDelete = true,\n Boot = true,\n ResourcePolicies = dailyBackup.Id,\n },\n new Gcp.Compute.Inputs.InstanceTemplateDiskArgs\n {\n Source = foobar.Name,\n AutoDelete = false,\n Boot = false,\n },\n },\n NetworkInterfaces = new[]\n {\n new Gcp.Compute.Inputs.InstanceTemplateNetworkInterfaceArgs\n {\n Network = \"default\",\n },\n },\n Metadata = \n {\n { \"foo\", \"bar\" },\n },\n ServiceAccount = new Gcp.Compute.Inputs.InstanceTemplateServiceAccountArgs\n {\n Email = @default.Email,\n Scopes = new[]\n {\n \"cloud-platform\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := serviceaccount.NewAccount(ctx, \"default\", \u0026serviceaccount.AccountArgs{\n\t\t\tAccountId: pulumi.String(\"service-account-id\"),\n\t\t\tDisplayName: pulumi.String(\"Service Account\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmyImage, err := compute.LookupImage(ctx, \u0026compute.LookupImageArgs{\n\t\t\tFamily: pulumi.StringRef(\"debian-11\"),\n\t\t\tProject: pulumi.StringRef(\"debian-cloud\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfoobar, err := compute.NewDisk(ctx, \"foobar\", \u0026compute.DiskArgs{\n\t\t\tName: pulumi.String(\"existing-disk\"),\n\t\t\tImage: pulumi.String(myImage.SelfLink),\n\t\t\tSize: pulumi.Int(10),\n\t\t\tType: pulumi.String(\"pd-ssd\"),\n\t\t\tZone: pulumi.String(\"us-central1-a\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdailyBackup, err := compute.NewResourcePolicy(ctx, \"daily_backup\", \u0026compute.ResourcePolicyArgs{\n\t\t\tName: pulumi.String(\"every-day-4am\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tSnapshotSchedulePolicy: \u0026compute.ResourcePolicySnapshotSchedulePolicyArgs{\n\t\t\t\tSchedule: \u0026compute.ResourcePolicySnapshotSchedulePolicyScheduleArgs{\n\t\t\t\t\tDailySchedule: \u0026compute.ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs{\n\t\t\t\t\t\tDaysInCycle: pulumi.Int(1),\n\t\t\t\t\t\tStartTime: pulumi.String(\"04:00\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewInstanceTemplate(ctx, \"default\", \u0026compute.InstanceTemplateArgs{\n\t\t\tName: pulumi.String(\"appserver-template\"),\n\t\t\tDescription: pulumi.String(\"This template is used to create app server instances.\"),\n\t\t\tTags: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"foo\"),\n\t\t\t\tpulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"environment\": pulumi.String(\"dev\"),\n\t\t\t},\n\t\t\tInstanceDescription: pulumi.String(\"description assigned to instances\"),\n\t\t\tMachineType: pulumi.String(\"e2-medium\"),\n\t\t\tCanIpForward: pulumi.Bool(false),\n\t\t\tScheduling: \u0026compute.InstanceTemplateSchedulingArgs{\n\t\t\t\tAutomaticRestart: pulumi.Bool(true),\n\t\t\t\tOnHostMaintenance: pulumi.String(\"MIGRATE\"),\n\t\t\t},\n\t\t\tDisks: compute.InstanceTemplateDiskArray{\n\t\t\t\t\u0026compute.InstanceTemplateDiskArgs{\n\t\t\t\t\tSourceImage: pulumi.String(\"debian-cloud/debian-11\"),\n\t\t\t\t\tAutoDelete: pulumi.Bool(true),\n\t\t\t\t\tBoot: pulumi.Bool(true),\n\t\t\t\t\tResourcePolicies: dailyBackup.ID(),\n\t\t\t\t},\n\t\t\t\t\u0026compute.InstanceTemplateDiskArgs{\n\t\t\t\t\tSource: foobar.Name,\n\t\t\t\t\tAutoDelete: pulumi.Bool(false),\n\t\t\t\t\tBoot: pulumi.Bool(false),\n\t\t\t\t},\n\t\t\t},\n\t\t\tNetworkInterfaces: compute.InstanceTemplateNetworkInterfaceArray{\n\t\t\t\t\u0026compute.InstanceTemplateNetworkInterfaceArgs{\n\t\t\t\t\tNetwork: pulumi.String(\"default\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tMetadata: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tServiceAccount: \u0026compute.InstanceTemplateServiceAccountArgs{\n\t\t\t\tEmail: _default.Email,\n\t\t\t\tScopes: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"cloud-platform\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.serviceaccount.Account;\nimport com.pulumi.gcp.serviceaccount.AccountArgs;\nimport com.pulumi.gcp.compute.ComputeFunctions;\nimport com.pulumi.gcp.compute.inputs.GetImageArgs;\nimport com.pulumi.gcp.compute.Disk;\nimport com.pulumi.gcp.compute.DiskArgs;\nimport com.pulumi.gcp.compute.ResourcePolicy;\nimport com.pulumi.gcp.compute.ResourcePolicyArgs;\nimport com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyArgs;\nimport com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyScheduleArgs;\nimport com.pulumi.gcp.compute.inputs.ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs;\nimport com.pulumi.gcp.compute.InstanceTemplate;\nimport com.pulumi.gcp.compute.InstanceTemplateArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateSchedulingArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateDiskArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateNetworkInterfaceArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateServiceAccountArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Account(\"default\", AccountArgs.builder()\n .accountId(\"service-account-id\")\n .displayName(\"Service Account\")\n .build());\n\n final var myImage = ComputeFunctions.getImage(GetImageArgs.builder()\n .family(\"debian-11\")\n .project(\"debian-cloud\")\n .build());\n\n var foobar = new Disk(\"foobar\", DiskArgs.builder()\n .name(\"existing-disk\")\n .image(myImage.applyValue(getImageResult -\u003e getImageResult.selfLink()))\n .size(10)\n .type(\"pd-ssd\")\n .zone(\"us-central1-a\")\n .build());\n\n var dailyBackup = new ResourcePolicy(\"dailyBackup\", ResourcePolicyArgs.builder()\n .name(\"every-day-4am\")\n .region(\"us-central1\")\n .snapshotSchedulePolicy(ResourcePolicySnapshotSchedulePolicyArgs.builder()\n .schedule(ResourcePolicySnapshotSchedulePolicyScheduleArgs.builder()\n .dailySchedule(ResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleArgs.builder()\n .daysInCycle(1)\n .startTime(\"04:00\")\n .build())\n .build())\n .build())\n .build());\n\n var defaultInstanceTemplate = new InstanceTemplate(\"defaultInstanceTemplate\", InstanceTemplateArgs.builder()\n .name(\"appserver-template\")\n .description(\"This template is used to create app server instances.\")\n .tags( \n \"foo\",\n \"bar\")\n .labels(Map.of(\"environment\", \"dev\"))\n .instanceDescription(\"description assigned to instances\")\n .machineType(\"e2-medium\")\n .canIpForward(false)\n .scheduling(InstanceTemplateSchedulingArgs.builder()\n .automaticRestart(true)\n .onHostMaintenance(\"MIGRATE\")\n .build())\n .disks( \n InstanceTemplateDiskArgs.builder()\n .sourceImage(\"debian-cloud/debian-11\")\n .autoDelete(true)\n .boot(true)\n .resourcePolicies(dailyBackup.id())\n .build(),\n InstanceTemplateDiskArgs.builder()\n .source(foobar.name())\n .autoDelete(false)\n .boot(false)\n .build())\n .networkInterfaces(InstanceTemplateNetworkInterfaceArgs.builder()\n .network(\"default\")\n .build())\n .metadata(Map.of(\"foo\", \"bar\"))\n .serviceAccount(InstanceTemplateServiceAccountArgs.builder()\n .email(default_.email())\n .scopes(\"cloud-platform\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:serviceaccount:Account\n properties:\n accountId: service-account-id\n displayName: Service Account\n defaultInstanceTemplate:\n type: gcp:compute:InstanceTemplate\n name: default\n properties:\n name: appserver-template\n description: This template is used to create app server instances.\n tags:\n - foo\n - bar\n labels:\n environment: dev\n instanceDescription: description assigned to instances\n machineType: e2-medium\n canIpForward: false\n scheduling:\n automaticRestart: true\n onHostMaintenance: MIGRATE\n disks:\n - sourceImage: debian-cloud/debian-11\n autoDelete: true\n boot: true\n resourcePolicies: ${dailyBackup.id}\n - source: ${foobar.name}\n autoDelete: false\n boot: false\n networkInterfaces:\n - network: default\n metadata:\n foo: bar\n serviceAccount:\n email: ${default.email}\n scopes:\n - cloud-platform\n foobar:\n type: gcp:compute:Disk\n properties:\n name: existing-disk\n image: ${myImage.selfLink}\n size: 10\n type: pd-ssd\n zone: us-central1-a\n dailyBackup:\n type: gcp:compute:ResourcePolicy\n name: daily_backup\n properties:\n name: every-day-4am\n region: us-central1\n snapshotSchedulePolicy:\n schedule:\n dailySchedule:\n daysInCycle: 1\n startTime: 04:00\nvariables:\n myImage:\n fn::invoke:\n Function: gcp:compute:getImage\n Arguments:\n family: debian-11\n project: debian-cloud\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Automatic Envoy Deployment\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst default = gcp.compute.getDefaultServiceAccount({});\nconst myImage = gcp.compute.getImage({\n family: \"debian-11\",\n project: \"debian-cloud\",\n});\nconst foobar = new gcp.compute.InstanceTemplate(\"foobar\", {\n name: \"appserver-template\",\n machineType: \"e2-medium\",\n canIpForward: false,\n tags: [\n \"foo\",\n \"bar\",\n ],\n disks: [{\n sourceImage: myImage.then(myImage =\u003e myImage.selfLink),\n autoDelete: true,\n boot: true,\n }],\n networkInterfaces: [{\n network: \"default\",\n }],\n scheduling: {\n preemptible: false,\n automaticRestart: true,\n },\n metadata: {\n \"gce-software-declaration\": `{\n \"softwareRecipes\": [{\n \"name\": \"install-gce-service-proxy-agent\",\n \"desired_state\": \"INSTALLED\",\n \"installSteps\": [{\n \"scriptRun\": {\n \"script\": \"#! /bin/bash\\\\nZONE=(curl --silent http://metadata.google.internal/computeMetadata/v1/instance/zone -H Metadata-Flavor:Google | cut -d/ -f4 )\\\\nexport SERVICE_PROXY_AGENT_DIRECTORY=(mktemp -d)\\\\nsudo gsutil cp gs://gce-service-proxy-\"ZONE\"/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"SERVICE_PROXY_AGENT_DIRECTORY\" || sudo gsutil cp gs://gce-service-proxy/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"SERVICE_PROXY_AGENT_DIRECTORY\"\\\\nsudo tar -xzf \"SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent-0.2.tgz -C \"SERVICE_PROXY_AGENT_DIRECTORY\"\\\\n\"SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent/service-proxy-agent-bootstrap.sh\"\n }\n }]\n }]\n}\n`,\n \"gce-service-proxy\": `{\n \"api-version\": \"0.2\",\n \"proxy-spec\": {\n \"proxy-port\": 15001,\n \"network\": \"my-network\",\n \"tracing\": \"ON\",\n \"access-log\": \"/var/log/envoy/access.log\"\n }\n \"service\": {\n \"serving-ports\": [80, 81]\n },\n \"labels\": {\n \"app_name\": \"bookserver_app\",\n \"app_version\": \"STABLE\"\n }\n}\n`,\n \"enable-guest-attributes\": \"true\",\n \"enable-osconfig\": \"true\",\n },\n serviceAccount: {\n email: _default.then(_default =\u003e _default.email),\n scopes: [\"cloud-platform\"],\n },\n labels: {\n \"gce-service-proxy\": \"on\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.compute.get_default_service_account()\nmy_image = gcp.compute.get_image(family=\"debian-11\",\n project=\"debian-cloud\")\nfoobar = gcp.compute.InstanceTemplate(\"foobar\",\n name=\"appserver-template\",\n machine_type=\"e2-medium\",\n can_ip_forward=False,\n tags=[\n \"foo\",\n \"bar\",\n ],\n disks=[{\n \"source_image\": my_image.self_link,\n \"auto_delete\": True,\n \"boot\": True,\n }],\n network_interfaces=[{\n \"network\": \"default\",\n }],\n scheduling={\n \"preemptible\": False,\n \"automatic_restart\": True,\n },\n metadata={\n \"gce-software-declaration\": \"\"\"{\n \"softwareRecipes\": [{\n \"name\": \"install-gce-service-proxy-agent\",\n \"desired_state\": \"INSTALLED\",\n \"installSteps\": [{\n \"scriptRun\": {\n \"script\": \"#! /bin/bash\\nZONE=$(curl --silent http://metadata.google.internal/computeMetadata/v1/instance/zone -H Metadata-Flavor:Google | cut -d/ -f4 )\\nexport SERVICE_PROXY_AGENT_DIRECTORY=$(mktemp -d)\\nsudo gsutil cp gs://gce-service-proxy-\"$ZONE\"/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\" || sudo gsutil cp gs://gce-service-proxy/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\nsudo tar -xzf \"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent-0.2.tgz -C \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\n\"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent/service-proxy-agent-bootstrap.sh\"\n }\n }]\n }]\n}\n\"\"\",\n \"gce-service-proxy\": \"\"\"{\n \"api-version\": \"0.2\",\n \"proxy-spec\": {\n \"proxy-port\": 15001,\n \"network\": \"my-network\",\n \"tracing\": \"ON\",\n \"access-log\": \"/var/log/envoy/access.log\"\n }\n \"service\": {\n \"serving-ports\": [80, 81]\n },\n \"labels\": {\n \"app_name\": \"bookserver_app\",\n \"app_version\": \"STABLE\"\n }\n}\n\"\"\",\n \"enable-guest-attributes\": \"true\",\n \"enable-osconfig\": \"true\",\n },\n service_account={\n \"email\": default.email,\n \"scopes\": [\"cloud-platform\"],\n },\n labels={\n \"gce-service-proxy\": \"on\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = Gcp.Compute.GetDefaultServiceAccount.Invoke();\n\n var myImage = Gcp.Compute.GetImage.Invoke(new()\n {\n Family = \"debian-11\",\n Project = \"debian-cloud\",\n });\n\n var foobar = new Gcp.Compute.InstanceTemplate(\"foobar\", new()\n {\n Name = \"appserver-template\",\n MachineType = \"e2-medium\",\n CanIpForward = false,\n Tags = new[]\n {\n \"foo\",\n \"bar\",\n },\n Disks = new[]\n {\n new Gcp.Compute.Inputs.InstanceTemplateDiskArgs\n {\n SourceImage = myImage.Apply(getImageResult =\u003e getImageResult.SelfLink),\n AutoDelete = true,\n Boot = true,\n },\n },\n NetworkInterfaces = new[]\n {\n new Gcp.Compute.Inputs.InstanceTemplateNetworkInterfaceArgs\n {\n Network = \"default\",\n },\n },\n Scheduling = new Gcp.Compute.Inputs.InstanceTemplateSchedulingArgs\n {\n Preemptible = false,\n AutomaticRestart = true,\n },\n Metadata = \n {\n { \"gce-software-declaration\", @\"{\n \"\"softwareRecipes\"\": [{\n \"\"name\"\": \"\"install-gce-service-proxy-agent\"\",\n \"\"desired_state\"\": \"\"INSTALLED\"\",\n \"\"installSteps\"\": [{\n \"\"scriptRun\"\": {\n \"\"script\"\": \"\"#! /bin/bash\\nZONE=$(curl --silent http://metadata.google.internal/computeMetadata/v1/instance/zone -H Metadata-Flavor:Google | cut -d/ -f4 )\\nexport SERVICE_PROXY_AGENT_DIRECTORY=$(mktemp -d)\\nsudo gsutil cp gs://gce-service-proxy-\"\"$ZONE\"\"/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"\"$SERVICE_PROXY_AGENT_DIRECTORY\"\" || sudo gsutil cp gs://gce-service-proxy/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"\"$SERVICE_PROXY_AGENT_DIRECTORY\"\"\\nsudo tar -xzf \"\"$SERVICE_PROXY_AGENT_DIRECTORY\"\"/service-proxy-agent-0.2.tgz -C \"\"$SERVICE_PROXY_AGENT_DIRECTORY\"\"\\n\"\"$SERVICE_PROXY_AGENT_DIRECTORY\"\"/service-proxy-agent/service-proxy-agent-bootstrap.sh\"\"\n }\n }]\n }]\n}\n\" },\n { \"gce-service-proxy\", @\"{\n \"\"api-version\"\": \"\"0.2\"\",\n \"\"proxy-spec\"\": {\n \"\"proxy-port\"\": 15001,\n \"\"network\"\": \"\"my-network\"\",\n \"\"tracing\"\": \"\"ON\"\",\n \"\"access-log\"\": \"\"/var/log/envoy/access.log\"\"\n }\n \"\"service\"\": {\n \"\"serving-ports\"\": [80, 81]\n },\n \"\"labels\"\": {\n \"\"app_name\"\": \"\"bookserver_app\"\",\n \"\"app_version\"\": \"\"STABLE\"\"\n }\n}\n\" },\n { \"enable-guest-attributes\", \"true\" },\n { \"enable-osconfig\", \"true\" },\n },\n ServiceAccount = new Gcp.Compute.Inputs.InstanceTemplateServiceAccountArgs\n {\n Email = @default.Apply(@default =\u003e @default.Apply(getDefaultServiceAccountResult =\u003e getDefaultServiceAccountResult.Email)),\n Scopes = new[]\n {\n \"cloud-platform\",\n },\n },\n Labels = \n {\n { \"gce-service-proxy\", \"on\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_default, err := compute.GetDefaultServiceAccount(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmyImage, err := compute.LookupImage(ctx, \u0026compute.LookupImageArgs{\n\t\t\tFamily: pulumi.StringRef(\"debian-11\"),\n\t\t\tProject: pulumi.StringRef(\"debian-cloud\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewInstanceTemplate(ctx, \"foobar\", \u0026compute.InstanceTemplateArgs{\n\t\t\tName: pulumi.String(\"appserver-template\"),\n\t\t\tMachineType: pulumi.String(\"e2-medium\"),\n\t\t\tCanIpForward: pulumi.Bool(false),\n\t\t\tTags: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"foo\"),\n\t\t\t\tpulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tDisks: compute.InstanceTemplateDiskArray{\n\t\t\t\t\u0026compute.InstanceTemplateDiskArgs{\n\t\t\t\t\tSourceImage: pulumi.String(myImage.SelfLink),\n\t\t\t\t\tAutoDelete: pulumi.Bool(true),\n\t\t\t\t\tBoot: pulumi.Bool(true),\n\t\t\t\t},\n\t\t\t},\n\t\t\tNetworkInterfaces: compute.InstanceTemplateNetworkInterfaceArray{\n\t\t\t\t\u0026compute.InstanceTemplateNetworkInterfaceArgs{\n\t\t\t\t\tNetwork: pulumi.String(\"default\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tScheduling: \u0026compute.InstanceTemplateSchedulingArgs{\n\t\t\t\tPreemptible: pulumi.Bool(false),\n\t\t\t\tAutomaticRestart: pulumi.Bool(true),\n\t\t\t},\n\t\t\tMetadata: pulumi.StringMap{\n\t\t\t\t\"gce-software-declaration\": pulumi.String(`{\n \"softwareRecipes\": [{\n \"name\": \"install-gce-service-proxy-agent\",\n \"desired_state\": \"INSTALLED\",\n \"installSteps\": [{\n \"scriptRun\": {\n \"script\": \"#! /bin/bash\\nZONE=$(curl --silent http://metadata.google.internal/computeMetadata/v1/instance/zone -H Metadata-Flavor:Google | cut -d/ -f4 )\\nexport SERVICE_PROXY_AGENT_DIRECTORY=$(mktemp -d)\\nsudo gsutil cp gs://gce-service-proxy-\"$ZONE\"/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\" || sudo gsutil cp gs://gce-service-proxy/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\nsudo tar -xzf \"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent-0.2.tgz -C \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\n\"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent/service-proxy-agent-bootstrap.sh\"\n }\n }]\n }]\n}\n`),\n\t\t\t\t\"gce-service-proxy\": pulumi.String(`{\n \"api-version\": \"0.2\",\n \"proxy-spec\": {\n \"proxy-port\": 15001,\n \"network\": \"my-network\",\n \"tracing\": \"ON\",\n \"access-log\": \"/var/log/envoy/access.log\"\n }\n \"service\": {\n \"serving-ports\": [80, 81]\n },\n \"labels\": {\n \"app_name\": \"bookserver_app\",\n \"app_version\": \"STABLE\"\n }\n}\n`),\n\t\t\t\t\"enable-guest-attributes\": pulumi.String(\"true\"),\n\t\t\t\t\"enable-osconfig\": pulumi.String(\"true\"),\n\t\t\t},\n\t\t\tServiceAccount: \u0026compute.InstanceTemplateServiceAccountArgs{\n\t\t\t\tEmail: pulumi.String(_default.Email),\n\t\t\t\tScopes: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"cloud-platform\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"gce-service-proxy\": pulumi.String(\"on\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.ComputeFunctions;\nimport com.pulumi.gcp.compute.inputs.GetDefaultServiceAccountArgs;\nimport com.pulumi.gcp.compute.inputs.GetImageArgs;\nimport com.pulumi.gcp.compute.InstanceTemplate;\nimport com.pulumi.gcp.compute.InstanceTemplateArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateDiskArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateNetworkInterfaceArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateSchedulingArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateServiceAccountArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var default = ComputeFunctions.getDefaultServiceAccount();\n\n final var myImage = ComputeFunctions.getImage(GetImageArgs.builder()\n .family(\"debian-11\")\n .project(\"debian-cloud\")\n .build());\n\n var foobar = new InstanceTemplate(\"foobar\", InstanceTemplateArgs.builder()\n .name(\"appserver-template\")\n .machineType(\"e2-medium\")\n .canIpForward(false)\n .tags( \n \"foo\",\n \"bar\")\n .disks(InstanceTemplateDiskArgs.builder()\n .sourceImage(myImage.applyValue(getImageResult -\u003e getImageResult.selfLink()))\n .autoDelete(true)\n .boot(true)\n .build())\n .networkInterfaces(InstanceTemplateNetworkInterfaceArgs.builder()\n .network(\"default\")\n .build())\n .scheduling(InstanceTemplateSchedulingArgs.builder()\n .preemptible(false)\n .automaticRestart(true)\n .build())\n .metadata(Map.ofEntries(\n Map.entry(\"gce-software-declaration\", \"\"\"\n{\n \"softwareRecipes\": [{\n \"name\": \"install-gce-service-proxy-agent\",\n \"desired_state\": \"INSTALLED\",\n \"installSteps\": [{\n \"scriptRun\": {\n \"script\": \"#! /bin/bash\\nZONE=$(curl --silent http://metadata.google.internal/computeMetadata/v1/instance/zone -H Metadata-Flavor:Google | cut -d/ -f4 )\\nexport SERVICE_PROXY_AGENT_DIRECTORY=$(mktemp -d)\\nsudo gsutil cp gs://gce-service-proxy-\"$ZONE\"/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\" || sudo gsutil cp gs://gce-service-proxy/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\nsudo tar -xzf \"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent-0.2.tgz -C \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\n\"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent/service-proxy-agent-bootstrap.sh\"\n }\n }]\n }]\n}\n \"\"\"),\n Map.entry(\"gce-service-proxy\", \"\"\"\n{\n \"api-version\": \"0.2\",\n \"proxy-spec\": {\n \"proxy-port\": 15001,\n \"network\": \"my-network\",\n \"tracing\": \"ON\",\n \"access-log\": \"/var/log/envoy/access.log\"\n }\n \"service\": {\n \"serving-ports\": [80, 81]\n },\n \"labels\": {\n \"app_name\": \"bookserver_app\",\n \"app_version\": \"STABLE\"\n }\n}\n \"\"\"),\n Map.entry(\"enable-guest-attributes\", \"true\"),\n Map.entry(\"enable-osconfig\", \"true\")\n ))\n .serviceAccount(InstanceTemplateServiceAccountArgs.builder()\n .email(default_.email())\n .scopes(\"cloud-platform\")\n .build())\n .labels(Map.of(\"gce-service-proxy\", \"on\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n foobar:\n type: gcp:compute:InstanceTemplate\n properties:\n name: appserver-template\n machineType: e2-medium\n canIpForward: false\n tags:\n - foo\n - bar\n disks:\n - sourceImage: ${myImage.selfLink}\n autoDelete: true\n boot: true\n networkInterfaces:\n - network: default\n scheduling:\n preemptible: false\n automaticRestart: true\n metadata:\n gce-software-declaration: |\n {\n \"softwareRecipes\": [{\n \"name\": \"install-gce-service-proxy-agent\",\n \"desired_state\": \"INSTALLED\",\n \"installSteps\": [{\n \"scriptRun\": {\n \"script\": \"#! /bin/bash\\nZONE=$(curl --silent http://metadata.google.internal/computeMetadata/v1/instance/zone -H Metadata-Flavor:Google | cut -d/ -f4 )\\nexport SERVICE_PROXY_AGENT_DIRECTORY=$(mktemp -d)\\nsudo gsutil cp gs://gce-service-proxy-\"$ZONE\"/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\" || sudo gsutil cp gs://gce-service-proxy/service-proxy-agent/releases/service-proxy-agent-0.2.tgz \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\nsudo tar -xzf \"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent-0.2.tgz -C \"$SERVICE_PROXY_AGENT_DIRECTORY\"\\n\"$SERVICE_PROXY_AGENT_DIRECTORY\"/service-proxy-agent/service-proxy-agent-bootstrap.sh\"\n }\n }]\n }]\n }\n gce-service-proxy: |\n {\n \"api-version\": \"0.2\",\n \"proxy-spec\": {\n \"proxy-port\": 15001,\n \"network\": \"my-network\",\n \"tracing\": \"ON\",\n \"access-log\": \"/var/log/envoy/access.log\"\n }\n \"service\": {\n \"serving-ports\": [80, 81]\n },\n \"labels\": {\n \"app_name\": \"bookserver_app\",\n \"app_version\": \"STABLE\"\n }\n }\n enable-guest-attributes: 'true'\n enable-osconfig: 'true'\n serviceAccount:\n email: ${default.email}\n scopes:\n - cloud-platform\n labels:\n gce-service-proxy: on\nvariables:\n default:\n fn::invoke:\n Function: gcp:compute:getDefaultServiceAccount\n Arguments: {}\n myImage:\n fn::invoke:\n Function: gcp:compute:getImage\n Arguments:\n family: debian-11\n project: debian-cloud\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n\n### Confidential Computing\n\nExample with [Confidential Mode](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) activated.\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.serviceaccount.Account(\"default\", {\n accountId: \"my-custom-sa\",\n displayName: \"Custom SA for VM Instance\",\n});\nconst confidentialInstanceTemplate = new gcp.compute.InstanceTemplate(\"confidential_instance_template\", {\n networkInterfaces: [{\n accessConfigs: [{}],\n network: \"default\",\n }],\n name: \"my-confidential-instance-template\",\n region: \"us-central1\",\n machineType: \"n2d-standard-2\",\n minCpuPlatform: \"AMD Milan\",\n confidentialInstanceConfig: {\n enableConfidentialCompute: true,\n confidentialInstanceType: \"SEV\",\n },\n disks: [{\n sourceImage: \"ubuntu-os-cloud/ubuntu-2004-lts\",\n }],\n serviceAccount: {\n email: _default.email,\n scopes: [\"cloud-platform\"],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.serviceaccount.Account(\"default\",\n account_id=\"my-custom-sa\",\n display_name=\"Custom SA for VM Instance\")\nconfidential_instance_template = gcp.compute.InstanceTemplate(\"confidential_instance_template\",\n network_interfaces=[{\n \"access_configs\": [{}],\n \"network\": \"default\",\n }],\n name=\"my-confidential-instance-template\",\n region=\"us-central1\",\n machine_type=\"n2d-standard-2\",\n min_cpu_platform=\"AMD Milan\",\n confidential_instance_config={\n \"enable_confidential_compute\": True,\n \"confidential_instance_type\": \"SEV\",\n },\n disks=[{\n \"source_image\": \"ubuntu-os-cloud/ubuntu-2004-lts\",\n }],\n service_account={\n \"email\": default.email,\n \"scopes\": [\"cloud-platform\"],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.ServiceAccount.Account(\"default\", new()\n {\n AccountId = \"my-custom-sa\",\n DisplayName = \"Custom SA for VM Instance\",\n });\n\n var confidentialInstanceTemplate = new Gcp.Compute.InstanceTemplate(\"confidential_instance_template\", new()\n {\n NetworkInterfaces = new[]\n {\n new Gcp.Compute.Inputs.InstanceTemplateNetworkInterfaceArgs\n {\n AccessConfigs = new[]\n {\n null,\n },\n Network = \"default\",\n },\n },\n Name = \"my-confidential-instance-template\",\n Region = \"us-central1\",\n MachineType = \"n2d-standard-2\",\n MinCpuPlatform = \"AMD Milan\",\n ConfidentialInstanceConfig = new Gcp.Compute.Inputs.InstanceTemplateConfidentialInstanceConfigArgs\n {\n EnableConfidentialCompute = true,\n ConfidentialInstanceType = \"SEV\",\n },\n Disks = new[]\n {\n new Gcp.Compute.Inputs.InstanceTemplateDiskArgs\n {\n SourceImage = \"ubuntu-os-cloud/ubuntu-2004-lts\",\n },\n },\n ServiceAccount = new Gcp.Compute.Inputs.InstanceTemplateServiceAccountArgs\n {\n Email = @default.Email,\n Scopes = new[]\n {\n \"cloud-platform\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := serviceaccount.NewAccount(ctx, \"default\", \u0026serviceaccount.AccountArgs{\n\t\t\tAccountId: pulumi.String(\"my-custom-sa\"),\n\t\t\tDisplayName: pulumi.String(\"Custom SA for VM Instance\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewInstanceTemplate(ctx, \"confidential_instance_template\", \u0026compute.InstanceTemplateArgs{\n\t\t\tNetworkInterfaces: compute.InstanceTemplateNetworkInterfaceArray{\n\t\t\t\t\u0026compute.InstanceTemplateNetworkInterfaceArgs{\n\t\t\t\t\tAccessConfigs: compute.InstanceTemplateNetworkInterfaceAccessConfigArray{\n\t\t\t\t\t\tnil,\n\t\t\t\t\t},\n\t\t\t\t\tNetwork: pulumi.String(\"default\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tName: pulumi.String(\"my-confidential-instance-template\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tMachineType: pulumi.String(\"n2d-standard-2\"),\n\t\t\tMinCpuPlatform: pulumi.String(\"AMD Milan\"),\n\t\t\tConfidentialInstanceConfig: \u0026compute.InstanceTemplateConfidentialInstanceConfigArgs{\n\t\t\t\tEnableConfidentialCompute: pulumi.Bool(true),\n\t\t\t\tConfidentialInstanceType: pulumi.String(\"SEV\"),\n\t\t\t},\n\t\t\tDisks: compute.InstanceTemplateDiskArray{\n\t\t\t\t\u0026compute.InstanceTemplateDiskArgs{\n\t\t\t\t\tSourceImage: pulumi.String(\"ubuntu-os-cloud/ubuntu-2004-lts\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tServiceAccount: \u0026compute.InstanceTemplateServiceAccountArgs{\n\t\t\t\tEmail: _default.Email,\n\t\t\t\tScopes: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"cloud-platform\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.serviceaccount.Account;\nimport com.pulumi.gcp.serviceaccount.AccountArgs;\nimport com.pulumi.gcp.compute.InstanceTemplate;\nimport com.pulumi.gcp.compute.InstanceTemplateArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateNetworkInterfaceArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateConfidentialInstanceConfigArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateDiskArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateServiceAccountArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Account(\"default\", AccountArgs.builder()\n .accountId(\"my-custom-sa\")\n .displayName(\"Custom SA for VM Instance\")\n .build());\n\n var confidentialInstanceTemplate = new InstanceTemplate(\"confidentialInstanceTemplate\", InstanceTemplateArgs.builder()\n .networkInterfaces(InstanceTemplateNetworkInterfaceArgs.builder()\n .accessConfigs()\n .network(\"default\")\n .build())\n .name(\"my-confidential-instance-template\")\n .region(\"us-central1\")\n .machineType(\"n2d-standard-2\")\n .minCpuPlatform(\"AMD Milan\")\n .confidentialInstanceConfig(InstanceTemplateConfidentialInstanceConfigArgs.builder()\n .enableConfidentialCompute(true)\n .confidentialInstanceType(\"SEV\")\n .build())\n .disks(InstanceTemplateDiskArgs.builder()\n .sourceImage(\"ubuntu-os-cloud/ubuntu-2004-lts\")\n .build())\n .serviceAccount(InstanceTemplateServiceAccountArgs.builder()\n .email(default_.email())\n .scopes(\"cloud-platform\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:serviceaccount:Account\n properties:\n accountId: my-custom-sa\n displayName: Custom SA for VM Instance\n confidentialInstanceTemplate:\n type: gcp:compute:InstanceTemplate\n name: confidential_instance_template\n properties:\n networkInterfaces:\n - accessConfigs:\n - {}\n network: default\n name: my-confidential-instance-template\n region: us-central1\n machineType: n2d-standard-2\n minCpuPlatform: AMD Milan\n confidentialInstanceConfig:\n enableConfidentialCompute: true\n confidentialInstanceType: SEV\n disks:\n - sourceImage: ubuntu-os-cloud/ubuntu-2004-lts\n serviceAccount:\n email: ${default.email}\n scopes:\n - cloud-platform\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Deploying the Latest Image\n\nA common way to use instance templates and managed instance groups is to deploy the\nlatest image in a family, usually the latest build of your application. There are two\nways to do this in the provider, and they have their pros and cons. The difference ends\nup being in how \"latest\" is interpreted. You can either deploy the latest image available\nwhen the provider runs, or you can have each instance check what the latest image is when\nit's being created, either as part of a scaling event or being rebuilt by the instance\ngroup manager.\n\nIf you're not sure, we recommend deploying the latest image available when the provider runs,\nbecause this means all the instances in your group will be based on the same image, always,\nand means that no upgrades or changes to your instances happen outside of a `pulumi up`.\nYou can achieve this by using the `gcp.compute.Image`\ndata source, which will retrieve the latest image on every `pulumi apply`, and will update\nthe template to use that specific image:\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst myImage = gcp.compute.getImage({\n family: \"debian-11\",\n project: \"debian-cloud\",\n});\nconst instanceTemplate = new gcp.compute.InstanceTemplate(\"instance_template\", {\n namePrefix: \"instance-template-\",\n machineType: \"e2-medium\",\n region: \"us-central1\",\n disks: [{\n sourceImage: myImage.then(myImage =\u003e myImage.selfLink),\n }],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nmy_image = gcp.compute.get_image(family=\"debian-11\",\n project=\"debian-cloud\")\ninstance_template = gcp.compute.InstanceTemplate(\"instance_template\",\n name_prefix=\"instance-template-\",\n machine_type=\"e2-medium\",\n region=\"us-central1\",\n disks=[{\n \"source_image\": my_image.self_link,\n }])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myImage = Gcp.Compute.GetImage.Invoke(new()\n {\n Family = \"debian-11\",\n Project = \"debian-cloud\",\n });\n\n var instanceTemplate = new Gcp.Compute.InstanceTemplate(\"instance_template\", new()\n {\n NamePrefix = \"instance-template-\",\n MachineType = \"e2-medium\",\n Region = \"us-central1\",\n Disks = new[]\n {\n new Gcp.Compute.Inputs.InstanceTemplateDiskArgs\n {\n SourceImage = myImage.Apply(getImageResult =\u003e getImageResult.SelfLink),\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tmyImage, err := compute.LookupImage(ctx, \u0026compute.LookupImageArgs{\n\t\t\tFamily: pulumi.StringRef(\"debian-11\"),\n\t\t\tProject: pulumi.StringRef(\"debian-cloud\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewInstanceTemplate(ctx, \"instance_template\", \u0026compute.InstanceTemplateArgs{\n\t\t\tNamePrefix: pulumi.String(\"instance-template-\"),\n\t\t\tMachineType: pulumi.String(\"e2-medium\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tDisks: compute.InstanceTemplateDiskArray{\n\t\t\t\t\u0026compute.InstanceTemplateDiskArgs{\n\t\t\t\t\tSourceImage: pulumi.String(myImage.SelfLink),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.ComputeFunctions;\nimport com.pulumi.gcp.compute.inputs.GetImageArgs;\nimport com.pulumi.gcp.compute.InstanceTemplate;\nimport com.pulumi.gcp.compute.InstanceTemplateArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateDiskArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var myImage = ComputeFunctions.getImage(GetImageArgs.builder()\n .family(\"debian-11\")\n .project(\"debian-cloud\")\n .build());\n\n var instanceTemplate = new InstanceTemplate(\"instanceTemplate\", InstanceTemplateArgs.builder()\n .namePrefix(\"instance-template-\")\n .machineType(\"e2-medium\")\n .region(\"us-central1\")\n .disks(InstanceTemplateDiskArgs.builder()\n .sourceImage(myImage.applyValue(getImageResult -\u003e getImageResult.selfLink()))\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instanceTemplate:\n type: gcp:compute:InstanceTemplate\n name: instance_template\n properties:\n namePrefix: instance-template-\n machineType: e2-medium\n region: us-central1\n disks:\n - sourceImage: ${myImage.selfLink}\nvariables:\n myImage:\n fn::invoke:\n Function: gcp:compute:getImage\n Arguments:\n family: debian-11\n project: debian-cloud\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\nTo have instances update to the latest on every scaling event or instance re-creation,\nuse the family as the image for the disk, and it will use GCP's default behavior, setting\nthe image for the template to the family:\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst instanceTemplate = new gcp.compute.InstanceTemplate(\"instance_template\", {\n namePrefix: \"instance-template-\",\n machineType: \"e2-medium\",\n region: \"us-central1\",\n disks: [{\n sourceImage: \"debian-cloud/debian-11\",\n }],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ninstance_template = gcp.compute.InstanceTemplate(\"instance_template\",\n name_prefix=\"instance-template-\",\n machine_type=\"e2-medium\",\n region=\"us-central1\",\n disks=[{\n \"source_image\": \"debian-cloud/debian-11\",\n }])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var instanceTemplate = new Gcp.Compute.InstanceTemplate(\"instance_template\", new()\n {\n NamePrefix = \"instance-template-\",\n MachineType = \"e2-medium\",\n Region = \"us-central1\",\n Disks = new[]\n {\n new Gcp.Compute.Inputs.InstanceTemplateDiskArgs\n {\n SourceImage = \"debian-cloud/debian-11\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewInstanceTemplate(ctx, \"instance_template\", \u0026compute.InstanceTemplateArgs{\n\t\t\tNamePrefix: pulumi.String(\"instance-template-\"),\n\t\t\tMachineType: pulumi.String(\"e2-medium\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tDisks: compute.InstanceTemplateDiskArray{\n\t\t\t\t\u0026compute.InstanceTemplateDiskArgs{\n\t\t\t\t\tSourceImage: pulumi.String(\"debian-cloud/debian-11\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.InstanceTemplate;\nimport com.pulumi.gcp.compute.InstanceTemplateArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceTemplateDiskArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var instanceTemplate = new InstanceTemplate(\"instanceTemplate\", InstanceTemplateArgs.builder()\n .namePrefix(\"instance-template-\")\n .machineType(\"e2-medium\")\n .region(\"us-central1\")\n .disks(InstanceTemplateDiskArgs.builder()\n .sourceImage(\"debian-cloud/debian-11\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instanceTemplate:\n type: gcp:compute:InstanceTemplate\n name: instance_template\n properties:\n namePrefix: instance-template-\n machineType: e2-medium\n region: us-central1\n disks:\n - sourceImage: debian-cloud/debian-11\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nInstance templates can be imported using any of these accepted formats:\n\n* `projects/{{project}}/global/instanceTemplates/{{name}}`\n\n* `{{project}}/{{name}}`\n\n* `{{name}}`\n\nWhen using the `pulumi import` command, instance templates can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:compute/instanceTemplate:InstanceTemplate default projects/{{project}}/global/instanceTemplates/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/instanceTemplate:InstanceTemplate default {{project}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/instanceTemplate:InstanceTemplate default {{name}}\n```\n\n", "properties": { "advancedMachineFeatures": { "$ref": "#/types/gcp:compute/InstanceTemplateAdvancedMachineFeatures:InstanceTemplateAdvancedMachineFeatures", @@ -152886,7 +154431,7 @@ "items": { "type": "string" }, - "description": "interconnects.list of features requested for this Interconnect connection. Options: MACSEC (\nIf specified then the connection is created on MACsec capable hardware ports. If not\nspecified, the default value is false, which allocates non-MACsec capable ports first if\navailable).\nEach value may be one of: `MACSEC`.\n" + "description": "interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC (\nIf specified then the connection is created on MACsec capable hardware ports. If not\nspecified, the default value is false, which allocates non-MACsec capable ports first if\navailable). Note that MACSEC is still technically allowed for compatibility reasons, but it\ndoes not work with the API, and will be removed in an upcoming major version.\nEach value may be one of: `MACSEC`, `IF_MACSEC`.\n" }, "requestedLinkCount": { "type": "integer", @@ -152998,7 +154543,7 @@ "items": { "type": "string" }, - "description": "interconnects.list of features requested for this Interconnect connection. Options: MACSEC (\nIf specified then the connection is created on MACsec capable hardware ports. If not\nspecified, the default value is false, which allocates non-MACsec capable ports first if\navailable).\nEach value may be one of: `MACSEC`.\n", + "description": "interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC (\nIf specified then the connection is created on MACsec capable hardware ports. If not\nspecified, the default value is false, which allocates non-MACsec capable ports first if\navailable). Note that MACSEC is still technically allowed for compatibility reasons, but it\ndoes not work with the API, and will be removed in an upcoming major version.\nEach value may be one of: `MACSEC`, `IF_MACSEC`.\n", "willReplaceOnChanges": true }, "requestedLinkCount": { @@ -153162,7 +154707,7 @@ "items": { "type": "string" }, - "description": "interconnects.list of features requested for this Interconnect connection. Options: MACSEC (\nIf specified then the connection is created on MACsec capable hardware ports. If not\nspecified, the default value is false, which allocates non-MACsec capable ports first if\navailable).\nEach value may be one of: `MACSEC`.\n", + "description": "interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC (\nIf specified then the connection is created on MACsec capable hardware ports. If not\nspecified, the default value is false, which allocates non-MACsec capable ports first if\navailable). Note that MACSEC is still technically allowed for compatibility reasons, but it\ndoes not work with the API, and will be removed in an upcoming major version.\nEach value may be one of: `MACSEC`, `IF_MACSEC`.\n", "willReplaceOnChanges": true }, "requestedLinkCount": { @@ -155992,8 +157537,15 @@ } }, "gcp:compute/nodeTemplate:NodeTemplate": { - "description": "Represents a NodeTemplate resource. Node templates specify properties\nfor creating sole-tenant nodes, such as node type, vCPU and memory\nrequirements, node affinity labels, and region.\n\n\nTo get more information about NodeTemplate, see:\n\n* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/nodeTemplates)\n* How-to Guides\n * [Sole-Tenant Nodes](https://cloud.google.com/compute/docs/nodes/)\n\n## Example Usage\n\n### Node Template Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst template = new gcp.compute.NodeTemplate(\"template\", {\n name: \"soletenant-tmpl\",\n region: \"us-central1\",\n nodeType: \"n1-node-96-624\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ntemplate = gcp.compute.NodeTemplate(\"template\",\n name=\"soletenant-tmpl\",\n region=\"us-central1\",\n node_type=\"n1-node-96-624\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var template = new Gcp.Compute.NodeTemplate(\"template\", new()\n {\n Name = \"soletenant-tmpl\",\n Region = \"us-central1\",\n NodeType = \"n1-node-96-624\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewNodeTemplate(ctx, \"template\", \u0026compute.NodeTemplateArgs{\n\t\t\tName: pulumi.String(\"soletenant-tmpl\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tNodeType: pulumi.String(\"n1-node-96-624\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.NodeTemplate;\nimport com.pulumi.gcp.compute.NodeTemplateArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var template = new NodeTemplate(\"template\", NodeTemplateArgs.builder()\n .name(\"soletenant-tmpl\")\n .region(\"us-central1\")\n .nodeType(\"n1-node-96-624\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n template:\n type: gcp:compute:NodeTemplate\n properties:\n name: soletenant-tmpl\n region: us-central1\n nodeType: n1-node-96-624\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Node Template Server Binding\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst central1a = gcp.compute.getNodeTypes({\n zone: \"us-central1-a\",\n});\nconst template = new gcp.compute.NodeTemplate(\"template\", {\n name: \"soletenant-with-licenses\",\n region: \"us-central1\",\n nodeType: \"n1-node-96-624\",\n nodeAffinityLabels: {\n foo: \"baz\",\n },\n serverBinding: {\n type: \"RESTART_NODE_ON_MINIMAL_SERVERS\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncentral1a = gcp.compute.get_node_types(zone=\"us-central1-a\")\ntemplate = gcp.compute.NodeTemplate(\"template\",\n name=\"soletenant-with-licenses\",\n region=\"us-central1\",\n node_type=\"n1-node-96-624\",\n node_affinity_labels={\n \"foo\": \"baz\",\n },\n server_binding={\n \"type\": \"RESTART_NODE_ON_MINIMAL_SERVERS\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var central1a = Gcp.Compute.GetNodeTypes.Invoke(new()\n {\n Zone = \"us-central1-a\",\n });\n\n var template = new Gcp.Compute.NodeTemplate(\"template\", new()\n {\n Name = \"soletenant-with-licenses\",\n Region = \"us-central1\",\n NodeType = \"n1-node-96-624\",\n NodeAffinityLabels = \n {\n { \"foo\", \"baz\" },\n },\n ServerBinding = new Gcp.Compute.Inputs.NodeTemplateServerBindingArgs\n {\n Type = \"RESTART_NODE_ON_MINIMAL_SERVERS\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.GetNodeTypes(ctx, \u0026compute.GetNodeTypesArgs{\n\t\t\tZone: pulumi.StringRef(\"us-central1-a\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewNodeTemplate(ctx, \"template\", \u0026compute.NodeTemplateArgs{\n\t\t\tName: pulumi.String(\"soletenant-with-licenses\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tNodeType: pulumi.String(\"n1-node-96-624\"),\n\t\t\tNodeAffinityLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"baz\"),\n\t\t\t},\n\t\t\tServerBinding: \u0026compute.NodeTemplateServerBindingArgs{\n\t\t\t\tType: pulumi.String(\"RESTART_NODE_ON_MINIMAL_SERVERS\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.ComputeFunctions;\nimport com.pulumi.gcp.compute.inputs.GetNodeTypesArgs;\nimport com.pulumi.gcp.compute.NodeTemplate;\nimport com.pulumi.gcp.compute.NodeTemplateArgs;\nimport com.pulumi.gcp.compute.inputs.NodeTemplateServerBindingArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var central1a = ComputeFunctions.getNodeTypes(GetNodeTypesArgs.builder()\n .zone(\"us-central1-a\")\n .build());\n\n var template = new NodeTemplate(\"template\", NodeTemplateArgs.builder()\n .name(\"soletenant-with-licenses\")\n .region(\"us-central1\")\n .nodeType(\"n1-node-96-624\")\n .nodeAffinityLabels(Map.of(\"foo\", \"baz\"))\n .serverBinding(NodeTemplateServerBindingArgs.builder()\n .type(\"RESTART_NODE_ON_MINIMAL_SERVERS\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n template:\n type: gcp:compute:NodeTemplate\n properties:\n name: soletenant-with-licenses\n region: us-central1\n nodeType: n1-node-96-624\n nodeAffinityLabels:\n foo: baz\n serverBinding:\n type: RESTART_NODE_ON_MINIMAL_SERVERS\nvariables:\n central1a:\n fn::invoke:\n Function: gcp:compute:getNodeTypes\n Arguments:\n zone: us-central1-a\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nNodeTemplate can be imported using any of these accepted formats:\n\n* `projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}`\n\n* `{{project}}/{{region}}/{{name}}`\n\n* `{{region}}/{{name}}`\n\n* `{{name}}`\n\nWhen using the `pulumi import` command, NodeTemplate can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:compute/nodeTemplate:NodeTemplate default projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/nodeTemplate:NodeTemplate default {{project}}/{{region}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/nodeTemplate:NodeTemplate default {{region}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/nodeTemplate:NodeTemplate default {{name}}\n```\n\n", + "description": "Represents a NodeTemplate resource. Node templates specify properties\nfor creating sole-tenant nodes, such as node type, vCPU and memory\nrequirements, node affinity labels, and region.\n\n\nTo get more information about NodeTemplate, see:\n\n* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/nodeTemplates)\n* How-to Guides\n * [Sole-Tenant Nodes](https://cloud.google.com/compute/docs/nodes/)\n\n## Example Usage\n\n### Node Template Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst template = new gcp.compute.NodeTemplate(\"template\", {\n name: \"soletenant-tmpl\",\n region: \"us-central1\",\n nodeType: \"n1-node-96-624\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ntemplate = gcp.compute.NodeTemplate(\"template\",\n name=\"soletenant-tmpl\",\n region=\"us-central1\",\n node_type=\"n1-node-96-624\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var template = new Gcp.Compute.NodeTemplate(\"template\", new()\n {\n Name = \"soletenant-tmpl\",\n Region = \"us-central1\",\n NodeType = \"n1-node-96-624\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.NewNodeTemplate(ctx, \"template\", \u0026compute.NodeTemplateArgs{\n\t\t\tName: pulumi.String(\"soletenant-tmpl\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tNodeType: pulumi.String(\"n1-node-96-624\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.NodeTemplate;\nimport com.pulumi.gcp.compute.NodeTemplateArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var template = new NodeTemplate(\"template\", NodeTemplateArgs.builder()\n .name(\"soletenant-tmpl\")\n .region(\"us-central1\")\n .nodeType(\"n1-node-96-624\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n template:\n type: gcp:compute:NodeTemplate\n properties:\n name: soletenant-tmpl\n region: us-central1\n nodeType: n1-node-96-624\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Node Template Server Binding\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst central1a = gcp.compute.getNodeTypes({\n zone: \"us-central1-a\",\n});\nconst template = new gcp.compute.NodeTemplate(\"template\", {\n name: \"soletenant-with-licenses\",\n region: \"us-central1\",\n nodeType: \"n1-node-96-624\",\n nodeAffinityLabels: {\n foo: \"baz\",\n },\n serverBinding: {\n type: \"RESTART_NODE_ON_MINIMAL_SERVERS\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncentral1a = gcp.compute.get_node_types(zone=\"us-central1-a\")\ntemplate = gcp.compute.NodeTemplate(\"template\",\n name=\"soletenant-with-licenses\",\n region=\"us-central1\",\n node_type=\"n1-node-96-624\",\n node_affinity_labels={\n \"foo\": \"baz\",\n },\n server_binding={\n \"type\": \"RESTART_NODE_ON_MINIMAL_SERVERS\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var central1a = Gcp.Compute.GetNodeTypes.Invoke(new()\n {\n Zone = \"us-central1-a\",\n });\n\n var template = new Gcp.Compute.NodeTemplate(\"template\", new()\n {\n Name = \"soletenant-with-licenses\",\n Region = \"us-central1\",\n NodeType = \"n1-node-96-624\",\n NodeAffinityLabels = \n {\n { \"foo\", \"baz\" },\n },\n ServerBinding = new Gcp.Compute.Inputs.NodeTemplateServerBindingArgs\n {\n Type = \"RESTART_NODE_ON_MINIMAL_SERVERS\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.GetNodeTypes(ctx, \u0026compute.GetNodeTypesArgs{\n\t\t\tZone: pulumi.StringRef(\"us-central1-a\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewNodeTemplate(ctx, \"template\", \u0026compute.NodeTemplateArgs{\n\t\t\tName: pulumi.String(\"soletenant-with-licenses\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tNodeType: pulumi.String(\"n1-node-96-624\"),\n\t\t\tNodeAffinityLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"baz\"),\n\t\t\t},\n\t\t\tServerBinding: \u0026compute.NodeTemplateServerBindingArgs{\n\t\t\t\tType: pulumi.String(\"RESTART_NODE_ON_MINIMAL_SERVERS\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.ComputeFunctions;\nimport com.pulumi.gcp.compute.inputs.GetNodeTypesArgs;\nimport com.pulumi.gcp.compute.NodeTemplate;\nimport com.pulumi.gcp.compute.NodeTemplateArgs;\nimport com.pulumi.gcp.compute.inputs.NodeTemplateServerBindingArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var central1a = ComputeFunctions.getNodeTypes(GetNodeTypesArgs.builder()\n .zone(\"us-central1-a\")\n .build());\n\n var template = new NodeTemplate(\"template\", NodeTemplateArgs.builder()\n .name(\"soletenant-with-licenses\")\n .region(\"us-central1\")\n .nodeType(\"n1-node-96-624\")\n .nodeAffinityLabels(Map.of(\"foo\", \"baz\"))\n .serverBinding(NodeTemplateServerBindingArgs.builder()\n .type(\"RESTART_NODE_ON_MINIMAL_SERVERS\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n template:\n type: gcp:compute:NodeTemplate\n properties:\n name: soletenant-with-licenses\n region: us-central1\n nodeType: n1-node-96-624\n nodeAffinityLabels:\n foo: baz\n serverBinding:\n type: RESTART_NODE_ON_MINIMAL_SERVERS\nvariables:\n central1a:\n fn::invoke:\n Function: gcp:compute:getNodeTypes\n Arguments:\n zone: us-central1-a\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Node Template Accelerators\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst central1a = gcp.compute.getNodeTypes({\n zone: \"us-central1-a\",\n});\nconst template = new gcp.compute.NodeTemplate(\"template\", {\n name: \"soletenant-with-accelerators\",\n region: \"us-central1\",\n nodeType: \"n1-node-96-624\",\n accelerators: [{\n acceleratorType: \"nvidia-tesla-t4\",\n acceleratorCount: 4,\n }],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncentral1a = gcp.compute.get_node_types(zone=\"us-central1-a\")\ntemplate = gcp.compute.NodeTemplate(\"template\",\n name=\"soletenant-with-accelerators\",\n region=\"us-central1\",\n node_type=\"n1-node-96-624\",\n accelerators=[{\n \"accelerator_type\": \"nvidia-tesla-t4\",\n \"accelerator_count\": 4,\n }])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var central1a = Gcp.Compute.GetNodeTypes.Invoke(new()\n {\n Zone = \"us-central1-a\",\n });\n\n var template = new Gcp.Compute.NodeTemplate(\"template\", new()\n {\n Name = \"soletenant-with-accelerators\",\n Region = \"us-central1\",\n NodeType = \"n1-node-96-624\",\n Accelerators = new[]\n {\n new Gcp.Compute.Inputs.NodeTemplateAcceleratorArgs\n {\n AcceleratorType = \"nvidia-tesla-t4\",\n AcceleratorCount = 4,\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := compute.GetNodeTypes(ctx, \u0026compute.GetNodeTypesArgs{\n\t\t\tZone: pulumi.StringRef(\"us-central1-a\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewNodeTemplate(ctx, \"template\", \u0026compute.NodeTemplateArgs{\n\t\t\tName: pulumi.String(\"soletenant-with-accelerators\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tNodeType: pulumi.String(\"n1-node-96-624\"),\n\t\t\tAccelerators: compute.NodeTemplateAcceleratorArray{\n\t\t\t\t\u0026compute.NodeTemplateAcceleratorArgs{\n\t\t\t\t\tAcceleratorType: pulumi.String(\"nvidia-tesla-t4\"),\n\t\t\t\t\tAcceleratorCount: pulumi.Int(4),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.ComputeFunctions;\nimport com.pulumi.gcp.compute.inputs.GetNodeTypesArgs;\nimport com.pulumi.gcp.compute.NodeTemplate;\nimport com.pulumi.gcp.compute.NodeTemplateArgs;\nimport com.pulumi.gcp.compute.inputs.NodeTemplateAcceleratorArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var central1a = ComputeFunctions.getNodeTypes(GetNodeTypesArgs.builder()\n .zone(\"us-central1-a\")\n .build());\n\n var template = new NodeTemplate(\"template\", NodeTemplateArgs.builder()\n .name(\"soletenant-with-accelerators\")\n .region(\"us-central1\")\n .nodeType(\"n1-node-96-624\")\n .accelerators(NodeTemplateAcceleratorArgs.builder()\n .acceleratorType(\"nvidia-tesla-t4\")\n .acceleratorCount(4)\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n template:\n type: gcp:compute:NodeTemplate\n properties:\n name: soletenant-with-accelerators\n region: us-central1\n nodeType: n1-node-96-624\n accelerators:\n - acceleratorType: nvidia-tesla-t4\n acceleratorCount: 4\nvariables:\n central1a:\n fn::invoke:\n Function: gcp:compute:getNodeTypes\n Arguments:\n zone: us-central1-a\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nNodeTemplate can be imported using any of these accepted formats:\n\n* `projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}`\n\n* `{{project}}/{{region}}/{{name}}`\n\n* `{{region}}/{{name}}`\n\n* `{{name}}`\n\nWhen using the `pulumi import` command, NodeTemplate can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:compute/nodeTemplate:NodeTemplate default projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/nodeTemplate:NodeTemplate default {{project}}/{{region}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/nodeTemplate:NodeTemplate default {{region}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:compute/nodeTemplate:NodeTemplate default {{name}}\n```\n\n", "properties": { + "accelerators": { + "type": "array", + "items": { + "$ref": "#/types/gcp:compute/NodeTemplateAccelerator:NodeTemplateAccelerator" + }, + "description": "List of the type and count of accelerator cards attached to the\nnode template\nStructure is documented below.\n" + }, "cpuOvercommitType": { "type": "string", "description": "CPU overcommit.\nDefault value is `NONE`.\nPossible values are: `ENABLED`, `NONE`.\n" @@ -156051,6 +157603,14 @@ "serverBinding" ], "inputProperties": { + "accelerators": { + "type": "array", + "items": { + "$ref": "#/types/gcp:compute/NodeTemplateAccelerator:NodeTemplateAccelerator" + }, + "description": "List of the type and count of accelerator cards attached to the\nnode template\nStructure is documented below.\n", + "willReplaceOnChanges": true + }, "cpuOvercommitType": { "type": "string", "description": "CPU overcommit.\nDefault value is `NONE`.\nPossible values are: `ENABLED`, `NONE`.\n", @@ -156103,6 +157663,14 @@ "stateInputs": { "description": "Input properties used for looking up and filtering NodeTemplate resources.\n", "properties": { + "accelerators": { + "type": "array", + "items": { + "$ref": "#/types/gcp:compute/NodeTemplateAccelerator:NodeTemplateAccelerator" + }, + "description": "List of the type and count of accelerator cards attached to the\nnode template\nStructure is documented below.\n", + "willReplaceOnChanges": true + }, "cpuOvercommitType": { "type": "string", "description": "CPU overcommit.\nDefault value is `NONE`.\nPossible values are: `ENABLED`, `NONE`.\n", @@ -167079,7 +168647,7 @@ }, "serverTlsPolicy": { "type": "string", - "description": "A URL referring to a networksecurity.ServerTlsPolicy\nresource that describes how the proxy should authenticate inbound\ntraffic. serverTlsPolicy only applies to a global TargetHttpsProxy\nattached to globalForwardingRules with the loadBalancingScheme\nset to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED.\nFor details which ServerTlsPolicy resources are accepted with\nINTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED\nloadBalancingScheme consult ServerTlsPolicy documentation.\nIf left blank, communications are not encrypted.\n" + "description": "A URL referring to a networksecurity.ServerTlsPolicy\nresource that describes how the proxy should authenticate inbound\ntraffic. serverTlsPolicy only applies to a global TargetHttpsProxy\nattached to globalForwardingRules with the loadBalancingScheme\nset to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED.\nFor details which ServerTlsPolicy resources are accepted with\nINTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED\nloadBalancingScheme consult ServerTlsPolicy documentation.\nIf left blank, communications are not encrypted.\nIf you remove this field from your configuration at the same time as\ndeleting or recreating a referenced ServerTlsPolicy resource, you will\nreceive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy\nwithin the ServerTlsPolicy resource to avoid this.\n" }, "sslCertificates": { "type": "array", @@ -167154,7 +168722,7 @@ }, "serverTlsPolicy": { "type": "string", - "description": "A URL referring to a networksecurity.ServerTlsPolicy\nresource that describes how the proxy should authenticate inbound\ntraffic. serverTlsPolicy only applies to a global TargetHttpsProxy\nattached to globalForwardingRules with the loadBalancingScheme\nset to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED.\nFor details which ServerTlsPolicy resources are accepted with\nINTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED\nloadBalancingScheme consult ServerTlsPolicy documentation.\nIf left blank, communications are not encrypted.\n" + "description": "A URL referring to a networksecurity.ServerTlsPolicy\nresource that describes how the proxy should authenticate inbound\ntraffic. serverTlsPolicy only applies to a global TargetHttpsProxy\nattached to globalForwardingRules with the loadBalancingScheme\nset to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED.\nFor details which ServerTlsPolicy resources are accepted with\nINTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED\nloadBalancingScheme consult ServerTlsPolicy documentation.\nIf left blank, communications are not encrypted.\nIf you remove this field from your configuration at the same time as\ndeleting or recreating a referenced ServerTlsPolicy resource, you will\nreceive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy\nwithin the ServerTlsPolicy resource to avoid this.\n" }, "sslCertificates": { "type": "array", @@ -167237,7 +168805,7 @@ }, "serverTlsPolicy": { "type": "string", - "description": "A URL referring to a networksecurity.ServerTlsPolicy\nresource that describes how the proxy should authenticate inbound\ntraffic. serverTlsPolicy only applies to a global TargetHttpsProxy\nattached to globalForwardingRules with the loadBalancingScheme\nset to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED.\nFor details which ServerTlsPolicy resources are accepted with\nINTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED\nloadBalancingScheme consult ServerTlsPolicy documentation.\nIf left blank, communications are not encrypted.\n" + "description": "A URL referring to a networksecurity.ServerTlsPolicy\nresource that describes how the proxy should authenticate inbound\ntraffic. serverTlsPolicy only applies to a global TargetHttpsProxy\nattached to globalForwardingRules with the loadBalancingScheme\nset to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED.\nFor details which ServerTlsPolicy resources are accepted with\nINTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED\nloadBalancingScheme consult ServerTlsPolicy documentation.\nIf left blank, communications are not encrypted.\nIf you remove this field from your configuration at the same time as\ndeleting or recreating a referenced ServerTlsPolicy resource, you will\nreceive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy\nwithin the ServerTlsPolicy resource to avoid this.\n" }, "sslCertificates": { "type": "array", @@ -168612,7 +170180,7 @@ }, "deletionPolicy": { "type": "string", - "description": "Policy to determine what flags to send on delete.\n" + "description": "Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS\n" }, "description": { "type": "string", @@ -168737,7 +170305,7 @@ }, "deletionPolicy": { "type": "string", - "description": "Policy to determine what flags to send on delete.\n" + "description": "Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS\n" }, "description": { "type": "string", @@ -168822,7 +170390,7 @@ }, "deletionPolicy": { "type": "string", - "description": "Policy to determine what flags to send on delete.\n" + "description": "Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS\n" }, "description": { "type": "string", @@ -172156,7 +173724,7 @@ } }, "gcp:databasemigrationservice/connectionProfile:ConnectionProfile": { - "description": "A connection profile definition.\n\n\nTo get more information about ConnectionProfile, see:\n\n* [API documentation](https://cloud.google.com/database-migration/docs/reference/rest/v1/projects.locations.connectionProfiles/create)\n* How-to Guides\n * [Database Migration](https://cloud.google.com/database-migration/docs/)\n\n\n\n## Example Usage\n\n### Database Migration Service Connection Profile Cloudsql\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst project = gcp.organizations.getProject({});\nconst cloudsqldb = new gcp.sql.DatabaseInstance(\"cloudsqldb\", {\n name: \"my-database\",\n databaseVersion: \"MYSQL_5_7\",\n settings: {\n tier: \"db-n1-standard-1\",\n deletionProtectionEnabled: false,\n },\n deletionProtection: false,\n});\nconst sqlClientCert = new gcp.sql.SslCert(\"sql_client_cert\", {\n commonName: \"my-cert\",\n instance: cloudsqldb.name,\n}, {\n dependsOn: [cloudsqldb],\n});\nconst sqldbUser = new gcp.sql.User(\"sqldb_user\", {\n name: \"my-username\",\n instance: cloudsqldb.name,\n password: \"my-password\",\n}, {\n dependsOn: [sqlClientCert],\n});\nconst cloudsqlprofile = new gcp.databasemigrationservice.ConnectionProfile(\"cloudsqlprofile\", {\n location: \"us-central1\",\n connectionProfileId: \"my-fromprofileid\",\n displayName: \"my-fromprofileid_display\",\n labels: {\n foo: \"bar\",\n },\n mysql: {\n host: cloudsqldb.ipAddresses.apply(ipAddresses =\u003e ipAddresses[0].ipAddress),\n port: 3306,\n username: sqldbUser.name,\n password: sqldbUser.password,\n ssl: {\n clientKey: sqlClientCert.privateKey,\n clientCertificate: sqlClientCert.cert,\n caCertificate: sqlClientCert.serverCaCert,\n },\n cloudSqlId: \"my-database\",\n },\n}, {\n dependsOn: [sqldbUser],\n});\nconst cloudsqlprofileDestination = new gcp.databasemigrationservice.ConnectionProfile(\"cloudsqlprofile_destination\", {\n location: \"us-central1\",\n connectionProfileId: \"my-toprofileid\",\n displayName: \"my-toprofileid_displayname\",\n labels: {\n foo: \"bar\",\n },\n cloudsql: {\n settings: {\n databaseVersion: \"MYSQL_5_7\",\n userLabels: {\n cloudfoo: \"cloudbar\",\n },\n tier: \"db-n1-standard-1\",\n edition: \"ENTERPRISE\",\n storageAutoResizeLimit: \"0\",\n activationPolicy: \"ALWAYS\",\n ipConfig: {\n enableIpv4: true,\n requireSsl: true,\n },\n autoStorageIncrease: true,\n dataDiskType: \"PD_HDD\",\n dataDiskSizeGb: \"11\",\n zone: \"us-central1-b\",\n sourceId: project.then(project =\u003e `projects/${project.projectId}/locations/us-central1/connectionProfiles/my-fromprofileid`),\n rootPassword: \"testpasscloudsql\",\n },\n },\n}, {\n dependsOn: [cloudsqlprofile],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproject = gcp.organizations.get_project()\ncloudsqldb = gcp.sql.DatabaseInstance(\"cloudsqldb\",\n name=\"my-database\",\n database_version=\"MYSQL_5_7\",\n settings={\n \"tier\": \"db-n1-standard-1\",\n \"deletion_protection_enabled\": False,\n },\n deletion_protection=False)\nsql_client_cert = gcp.sql.SslCert(\"sql_client_cert\",\n common_name=\"my-cert\",\n instance=cloudsqldb.name,\n opts = pulumi.ResourceOptions(depends_on=[cloudsqldb]))\nsqldb_user = gcp.sql.User(\"sqldb_user\",\n name=\"my-username\",\n instance=cloudsqldb.name,\n password=\"my-password\",\n opts = pulumi.ResourceOptions(depends_on=[sql_client_cert]))\ncloudsqlprofile = gcp.databasemigrationservice.ConnectionProfile(\"cloudsqlprofile\",\n location=\"us-central1\",\n connection_profile_id=\"my-fromprofileid\",\n display_name=\"my-fromprofileid_display\",\n labels={\n \"foo\": \"bar\",\n },\n mysql={\n \"host\": cloudsqldb.ip_addresses[0].ip_address,\n \"port\": 3306,\n \"username\": sqldb_user.name,\n \"password\": sqldb_user.password,\n \"ssl\": {\n \"client_key\": sql_client_cert.private_key,\n \"client_certificate\": sql_client_cert.cert,\n \"ca_certificate\": sql_client_cert.server_ca_cert,\n },\n \"cloud_sql_id\": \"my-database\",\n },\n opts = pulumi.ResourceOptions(depends_on=[sqldb_user]))\ncloudsqlprofile_destination = gcp.databasemigrationservice.ConnectionProfile(\"cloudsqlprofile_destination\",\n location=\"us-central1\",\n connection_profile_id=\"my-toprofileid\",\n display_name=\"my-toprofileid_displayname\",\n labels={\n \"foo\": \"bar\",\n },\n cloudsql={\n \"settings\": {\n \"database_version\": \"MYSQL_5_7\",\n \"user_labels\": {\n \"cloudfoo\": \"cloudbar\",\n },\n \"tier\": \"db-n1-standard-1\",\n \"edition\": \"ENTERPRISE\",\n \"storage_auto_resize_limit\": \"0\",\n \"activation_policy\": \"ALWAYS\",\n \"ip_config\": {\n \"enable_ipv4\": True,\n \"require_ssl\": True,\n },\n \"auto_storage_increase\": True,\n \"data_disk_type\": \"PD_HDD\",\n \"data_disk_size_gb\": \"11\",\n \"zone\": \"us-central1-b\",\n \"source_id\": f\"projects/{project.project_id}/locations/us-central1/connectionProfiles/my-fromprofileid\",\n \"root_password\": \"testpasscloudsql\",\n },\n },\n opts = pulumi.ResourceOptions(depends_on=[cloudsqlprofile]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var cloudsqldb = new Gcp.Sql.DatabaseInstance(\"cloudsqldb\", new()\n {\n Name = \"my-database\",\n DatabaseVersion = \"MYSQL_5_7\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-n1-standard-1\",\n DeletionProtectionEnabled = false,\n },\n DeletionProtection = false,\n });\n\n var sqlClientCert = new Gcp.Sql.SslCert(\"sql_client_cert\", new()\n {\n CommonName = \"my-cert\",\n Instance = cloudsqldb.Name,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n cloudsqldb,\n },\n });\n\n var sqldbUser = new Gcp.Sql.User(\"sqldb_user\", new()\n {\n Name = \"my-username\",\n Instance = cloudsqldb.Name,\n Password = \"my-password\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n sqlClientCert,\n },\n });\n\n var cloudsqlprofile = new Gcp.DatabaseMigrationService.ConnectionProfile(\"cloudsqlprofile\", new()\n {\n Location = \"us-central1\",\n ConnectionProfileId = \"my-fromprofileid\",\n DisplayName = \"my-fromprofileid_display\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Mysql = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileMysqlArgs\n {\n Host = cloudsqldb.IpAddresses.Apply(ipAddresses =\u003e ipAddresses[0].IpAddress),\n Port = 3306,\n Username = sqldbUser.Name,\n Password = sqldbUser.Password,\n Ssl = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileMysqlSslArgs\n {\n ClientKey = sqlClientCert.PrivateKey,\n ClientCertificate = sqlClientCert.Cert,\n CaCertificate = sqlClientCert.ServerCaCert,\n },\n CloudSqlId = \"my-database\",\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n sqldbUser,\n },\n });\n\n var cloudsqlprofileDestination = new Gcp.DatabaseMigrationService.ConnectionProfile(\"cloudsqlprofile_destination\", new()\n {\n Location = \"us-central1\",\n ConnectionProfileId = \"my-toprofileid\",\n DisplayName = \"my-toprofileid_displayname\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Cloudsql = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileCloudsqlArgs\n {\n Settings = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileCloudsqlSettingsArgs\n {\n DatabaseVersion = \"MYSQL_5_7\",\n UserLabels = \n {\n { \"cloudfoo\", \"cloudbar\" },\n },\n Tier = \"db-n1-standard-1\",\n Edition = \"ENTERPRISE\",\n StorageAutoResizeLimit = \"0\",\n ActivationPolicy = \"ALWAYS\",\n IpConfig = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileCloudsqlSettingsIpConfigArgs\n {\n EnableIpv4 = true,\n RequireSsl = true,\n },\n AutoStorageIncrease = true,\n DataDiskType = \"PD_HDD\",\n DataDiskSizeGb = \"11\",\n Zone = \"us-central1-b\",\n SourceId = $\"projects/{project.Apply(getProjectResult =\u003e getProjectResult.ProjectId)}/locations/us-central1/connectionProfiles/my-fromprofileid\",\n RootPassword = \"testpasscloudsql\",\n },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n cloudsqlprofile,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/databasemigrationservice\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcloudsqldb, err := sql.NewDatabaseInstance(ctx, \"cloudsqldb\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"my-database\"),\n\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_5_7\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-n1-standard-1\"),\n\t\t\t\tDeletionProtectionEnabled: pulumi.Bool(false),\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsqlClientCert, err := sql.NewSslCert(ctx, \"sql_client_cert\", \u0026sql.SslCertArgs{\n\t\t\tCommonName: pulumi.String(\"my-cert\"),\n\t\t\tInstance: cloudsqldb.Name,\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tcloudsqldb,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsqldbUser, err := sql.NewUser(ctx, \"sqldb_user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"my-username\"),\n\t\t\tInstance: cloudsqldb.Name,\n\t\t\tPassword: pulumi.String(\"my-password\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsqlClientCert,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcloudsqlprofile, err := databasemigrationservice.NewConnectionProfile(ctx, \"cloudsqlprofile\", \u0026databasemigrationservice.ConnectionProfileArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"my-fromprofileid\"),\n\t\t\tDisplayName: pulumi.String(\"my-fromprofileid_display\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tMysql: \u0026databasemigrationservice.ConnectionProfileMysqlArgs{\n\t\t\t\tHost: cloudsqldb.IpAddresses.ApplyT(func(ipAddresses []sql.DatabaseInstanceIpAddress) (*string, error) {\n\t\t\t\t\treturn \u0026ipAddresses[0].IpAddress, nil\n\t\t\t\t}).(pulumi.StringPtrOutput),\n\t\t\t\tPort: pulumi.Int(3306),\n\t\t\t\tUsername: sqldbUser.Name,\n\t\t\t\tPassword: sqldbUser.Password,\n\t\t\t\tSsl: \u0026databasemigrationservice.ConnectionProfileMysqlSslArgs{\n\t\t\t\t\tClientKey: sqlClientCert.PrivateKey,\n\t\t\t\t\tClientCertificate: sqlClientCert.Cert,\n\t\t\t\t\tCaCertificate: sqlClientCert.ServerCaCert,\n\t\t\t\t},\n\t\t\t\tCloudSqlId: pulumi.String(\"my-database\"),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsqldbUser,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databasemigrationservice.NewConnectionProfile(ctx, \"cloudsqlprofile_destination\", \u0026databasemigrationservice.ConnectionProfileArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"my-toprofileid\"),\n\t\t\tDisplayName: pulumi.String(\"my-toprofileid_displayname\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tCloudsql: \u0026databasemigrationservice.ConnectionProfileCloudsqlArgs{\n\t\t\t\tSettings: \u0026databasemigrationservice.ConnectionProfileCloudsqlSettingsArgs{\n\t\t\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_5_7\"),\n\t\t\t\t\tUserLabels: pulumi.StringMap{\n\t\t\t\t\t\t\"cloudfoo\": pulumi.String(\"cloudbar\"),\n\t\t\t\t\t},\n\t\t\t\t\tTier: pulumi.String(\"db-n1-standard-1\"),\n\t\t\t\t\tEdition: pulumi.String(\"ENTERPRISE\"),\n\t\t\t\t\tStorageAutoResizeLimit: pulumi.String(\"0\"),\n\t\t\t\t\tActivationPolicy: pulumi.String(\"ALWAYS\"),\n\t\t\t\t\tIpConfig: \u0026databasemigrationservice.ConnectionProfileCloudsqlSettingsIpConfigArgs{\n\t\t\t\t\t\tEnableIpv4: pulumi.Bool(true),\n\t\t\t\t\t\tRequireSsl: pulumi.Bool(true),\n\t\t\t\t\t},\n\t\t\t\t\tAutoStorageIncrease: pulumi.Bool(true),\n\t\t\t\t\tDataDiskType: pulumi.String(\"PD_HDD\"),\n\t\t\t\t\tDataDiskSizeGb: pulumi.String(\"11\"),\n\t\t\t\t\tZone: pulumi.String(\"us-central1-b\"),\n\t\t\t\t\tSourceId: pulumi.Sprintf(\"projects/%v/locations/us-central1/connectionProfiles/my-fromprofileid\", project.ProjectId),\n\t\t\t\t\tRootPassword: pulumi.String(\"testpasscloudsql\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tcloudsqlprofile,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.SslCert;\nimport com.pulumi.gcp.sql.SslCertArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfile;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfileArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileMysqlArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileMysqlSslArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileCloudsqlArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileCloudsqlSettingsArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileCloudsqlSettingsIpConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var cloudsqldb = new DatabaseInstance(\"cloudsqldb\", DatabaseInstanceArgs.builder()\n .name(\"my-database\")\n .databaseVersion(\"MYSQL_5_7\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-n1-standard-1\")\n .deletionProtectionEnabled(false)\n .build())\n .deletionProtection(false)\n .build());\n\n var sqlClientCert = new SslCert(\"sqlClientCert\", SslCertArgs.builder()\n .commonName(\"my-cert\")\n .instance(cloudsqldb.name())\n .build(), CustomResourceOptions.builder()\n .dependsOn(cloudsqldb)\n .build());\n\n var sqldbUser = new User(\"sqldbUser\", UserArgs.builder()\n .name(\"my-username\")\n .instance(cloudsqldb.name())\n .password(\"my-password\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(sqlClientCert)\n .build());\n\n var cloudsqlprofile = new ConnectionProfile(\"cloudsqlprofile\", ConnectionProfileArgs.builder()\n .location(\"us-central1\")\n .connectionProfileId(\"my-fromprofileid\")\n .displayName(\"my-fromprofileid_display\")\n .labels(Map.of(\"foo\", \"bar\"))\n .mysql(ConnectionProfileMysqlArgs.builder()\n .host(cloudsqldb.ipAddresses().applyValue(ipAddresses -\u003e ipAddresses[0].ipAddress()))\n .port(3306)\n .username(sqldbUser.name())\n .password(sqldbUser.password())\n .ssl(ConnectionProfileMysqlSslArgs.builder()\n .clientKey(sqlClientCert.privateKey())\n .clientCertificate(sqlClientCert.cert())\n .caCertificate(sqlClientCert.serverCaCert())\n .build())\n .cloudSqlId(\"my-database\")\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(sqldbUser)\n .build());\n\n var cloudsqlprofileDestination = new ConnectionProfile(\"cloudsqlprofileDestination\", ConnectionProfileArgs.builder()\n .location(\"us-central1\")\n .connectionProfileId(\"my-toprofileid\")\n .displayName(\"my-toprofileid_displayname\")\n .labels(Map.of(\"foo\", \"bar\"))\n .cloudsql(ConnectionProfileCloudsqlArgs.builder()\n .settings(ConnectionProfileCloudsqlSettingsArgs.builder()\n .databaseVersion(\"MYSQL_5_7\")\n .userLabels(Map.of(\"cloudfoo\", \"cloudbar\"))\n .tier(\"db-n1-standard-1\")\n .edition(\"ENTERPRISE\")\n .storageAutoResizeLimit(\"0\")\n .activationPolicy(\"ALWAYS\")\n .ipConfig(ConnectionProfileCloudsqlSettingsIpConfigArgs.builder()\n .enableIpv4(true)\n .requireSsl(true)\n .build())\n .autoStorageIncrease(true)\n .dataDiskType(\"PD_HDD\")\n .dataDiskSizeGb(\"11\")\n .zone(\"us-central1-b\")\n .sourceId(String.format(\"projects/%s/locations/us-central1/connectionProfiles/my-fromprofileid\", project.applyValue(getProjectResult -\u003e getProjectResult.projectId())))\n .rootPassword(\"testpasscloudsql\")\n .build())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(cloudsqlprofile)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cloudsqldb:\n type: gcp:sql:DatabaseInstance\n properties:\n name: my-database\n databaseVersion: MYSQL_5_7\n settings:\n tier: db-n1-standard-1\n deletionProtectionEnabled: false\n deletionProtection: false\n sqlClientCert:\n type: gcp:sql:SslCert\n name: sql_client_cert\n properties:\n commonName: my-cert\n instance: ${cloudsqldb.name}\n options:\n dependson:\n - ${cloudsqldb}\n sqldbUser:\n type: gcp:sql:User\n name: sqldb_user\n properties:\n name: my-username\n instance: ${cloudsqldb.name}\n password: my-password\n options:\n dependson:\n - ${sqlClientCert}\n cloudsqlprofile:\n type: gcp:databasemigrationservice:ConnectionProfile\n properties:\n location: us-central1\n connectionProfileId: my-fromprofileid\n displayName: my-fromprofileid_display\n labels:\n foo: bar\n mysql:\n host: ${cloudsqldb.ipAddresses[0].ipAddress}\n port: 3306\n username: ${sqldbUser.name}\n password: ${sqldbUser.password}\n ssl:\n clientKey: ${sqlClientCert.privateKey}\n clientCertificate: ${sqlClientCert.cert}\n caCertificate: ${sqlClientCert.serverCaCert}\n cloudSqlId: my-database\n options:\n dependson:\n - ${sqldbUser}\n cloudsqlprofileDestination:\n type: gcp:databasemigrationservice:ConnectionProfile\n name: cloudsqlprofile_destination\n properties:\n location: us-central1\n connectionProfileId: my-toprofileid\n displayName: my-toprofileid_displayname\n labels:\n foo: bar\n cloudsql:\n settings:\n databaseVersion: MYSQL_5_7\n userLabels:\n cloudfoo: cloudbar\n tier: db-n1-standard-1\n edition: ENTERPRISE\n storageAutoResizeLimit: '0'\n activationPolicy: ALWAYS\n ipConfig:\n enableIpv4: true\n requireSsl: true\n autoStorageIncrease: true\n dataDiskType: PD_HDD\n dataDiskSizeGb: '11'\n zone: us-central1-b\n sourceId: projects/${project.projectId}/locations/us-central1/connectionProfiles/my-fromprofileid\n rootPassword: testpasscloudsql\n options:\n dependson:\n - ${cloudsqlprofile}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Database Migration Service Connection Profile Postgres\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst postgresqldb = new gcp.sql.DatabaseInstance(\"postgresqldb\", {\n name: \"my-database\",\n databaseVersion: \"POSTGRES_12\",\n settings: {\n tier: \"db-custom-2-13312\",\n },\n deletionProtection: false,\n});\nconst sqlClientCert = new gcp.sql.SslCert(\"sql_client_cert\", {\n commonName: \"my-cert\",\n instance: postgresqldb.name,\n}, {\n dependsOn: [postgresqldb],\n});\nconst sqldbUser = new gcp.sql.User(\"sqldb_user\", {\n name: \"my-username\",\n instance: postgresqldb.name,\n password: \"my-password\",\n}, {\n dependsOn: [sqlClientCert],\n});\nconst postgresprofile = new gcp.databasemigrationservice.ConnectionProfile(\"postgresprofile\", {\n location: \"us-central1\",\n connectionProfileId: \"my-profileid\",\n displayName: \"my-profileid_display\",\n labels: {\n foo: \"bar\",\n },\n postgresql: {\n host: postgresqldb.ipAddresses.apply(ipAddresses =\u003e ipAddresses[0].ipAddress),\n port: 5432,\n username: sqldbUser.name,\n password: sqldbUser.password,\n ssl: {\n clientKey: sqlClientCert.privateKey,\n clientCertificate: sqlClientCert.cert,\n caCertificate: sqlClientCert.serverCaCert,\n },\n cloudSqlId: \"my-database\",\n },\n}, {\n dependsOn: [sqldbUser],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\npostgresqldb = gcp.sql.DatabaseInstance(\"postgresqldb\",\n name=\"my-database\",\n database_version=\"POSTGRES_12\",\n settings={\n \"tier\": \"db-custom-2-13312\",\n },\n deletion_protection=False)\nsql_client_cert = gcp.sql.SslCert(\"sql_client_cert\",\n common_name=\"my-cert\",\n instance=postgresqldb.name,\n opts = pulumi.ResourceOptions(depends_on=[postgresqldb]))\nsqldb_user = gcp.sql.User(\"sqldb_user\",\n name=\"my-username\",\n instance=postgresqldb.name,\n password=\"my-password\",\n opts = pulumi.ResourceOptions(depends_on=[sql_client_cert]))\npostgresprofile = gcp.databasemigrationservice.ConnectionProfile(\"postgresprofile\",\n location=\"us-central1\",\n connection_profile_id=\"my-profileid\",\n display_name=\"my-profileid_display\",\n labels={\n \"foo\": \"bar\",\n },\n postgresql={\n \"host\": postgresqldb.ip_addresses[0].ip_address,\n \"port\": 5432,\n \"username\": sqldb_user.name,\n \"password\": sqldb_user.password,\n \"ssl\": {\n \"client_key\": sql_client_cert.private_key,\n \"client_certificate\": sql_client_cert.cert,\n \"ca_certificate\": sql_client_cert.server_ca_cert,\n },\n \"cloud_sql_id\": \"my-database\",\n },\n opts = pulumi.ResourceOptions(depends_on=[sqldb_user]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var postgresqldb = new Gcp.Sql.DatabaseInstance(\"postgresqldb\", new()\n {\n Name = \"my-database\",\n DatabaseVersion = \"POSTGRES_12\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-custom-2-13312\",\n },\n DeletionProtection = false,\n });\n\n var sqlClientCert = new Gcp.Sql.SslCert(\"sql_client_cert\", new()\n {\n CommonName = \"my-cert\",\n Instance = postgresqldb.Name,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n postgresqldb,\n },\n });\n\n var sqldbUser = new Gcp.Sql.User(\"sqldb_user\", new()\n {\n Name = \"my-username\",\n Instance = postgresqldb.Name,\n Password = \"my-password\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n sqlClientCert,\n },\n });\n\n var postgresprofile = new Gcp.DatabaseMigrationService.ConnectionProfile(\"postgresprofile\", new()\n {\n Location = \"us-central1\",\n ConnectionProfileId = \"my-profileid\",\n DisplayName = \"my-profileid_display\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Postgresql = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfilePostgresqlArgs\n {\n Host = postgresqldb.IpAddresses.Apply(ipAddresses =\u003e ipAddresses[0].IpAddress),\n Port = 5432,\n Username = sqldbUser.Name,\n Password = sqldbUser.Password,\n Ssl = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfilePostgresqlSslArgs\n {\n ClientKey = sqlClientCert.PrivateKey,\n ClientCertificate = sqlClientCert.Cert,\n CaCertificate = sqlClientCert.ServerCaCert,\n },\n CloudSqlId = \"my-database\",\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n sqldbUser,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/databasemigrationservice\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpostgresqldb, err := sql.NewDatabaseInstance(ctx, \"postgresqldb\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"my-database\"),\n\t\t\tDatabaseVersion: pulumi.String(\"POSTGRES_12\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-custom-2-13312\"),\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsqlClientCert, err := sql.NewSslCert(ctx, \"sql_client_cert\", \u0026sql.SslCertArgs{\n\t\t\tCommonName: pulumi.String(\"my-cert\"),\n\t\t\tInstance: postgresqldb.Name,\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tpostgresqldb,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsqldbUser, err := sql.NewUser(ctx, \"sqldb_user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"my-username\"),\n\t\t\tInstance: postgresqldb.Name,\n\t\t\tPassword: pulumi.String(\"my-password\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsqlClientCert,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databasemigrationservice.NewConnectionProfile(ctx, \"postgresprofile\", \u0026databasemigrationservice.ConnectionProfileArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"my-profileid\"),\n\t\t\tDisplayName: pulumi.String(\"my-profileid_display\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tPostgresql: \u0026databasemigrationservice.ConnectionProfilePostgresqlArgs{\n\t\t\t\tHost: postgresqldb.IpAddresses.ApplyT(func(ipAddresses []sql.DatabaseInstanceIpAddress) (*string, error) {\n\t\t\t\t\treturn \u0026ipAddresses[0].IpAddress, nil\n\t\t\t\t}).(pulumi.StringPtrOutput),\n\t\t\t\tPort: pulumi.Int(5432),\n\t\t\t\tUsername: sqldbUser.Name,\n\t\t\t\tPassword: sqldbUser.Password,\n\t\t\t\tSsl: \u0026databasemigrationservice.ConnectionProfilePostgresqlSslArgs{\n\t\t\t\t\tClientKey: sqlClientCert.PrivateKey,\n\t\t\t\t\tClientCertificate: sqlClientCert.Cert,\n\t\t\t\t\tCaCertificate: sqlClientCert.ServerCaCert,\n\t\t\t\t},\n\t\t\t\tCloudSqlId: pulumi.String(\"my-database\"),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsqldbUser,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.SslCert;\nimport com.pulumi.gcp.sql.SslCertArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfile;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfileArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfilePostgresqlArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfilePostgresqlSslArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var postgresqldb = new DatabaseInstance(\"postgresqldb\", DatabaseInstanceArgs.builder()\n .name(\"my-database\")\n .databaseVersion(\"POSTGRES_12\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-custom-2-13312\")\n .build())\n .deletionProtection(false)\n .build());\n\n var sqlClientCert = new SslCert(\"sqlClientCert\", SslCertArgs.builder()\n .commonName(\"my-cert\")\n .instance(postgresqldb.name())\n .build(), CustomResourceOptions.builder()\n .dependsOn(postgresqldb)\n .build());\n\n var sqldbUser = new User(\"sqldbUser\", UserArgs.builder()\n .name(\"my-username\")\n .instance(postgresqldb.name())\n .password(\"my-password\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(sqlClientCert)\n .build());\n\n var postgresprofile = new ConnectionProfile(\"postgresprofile\", ConnectionProfileArgs.builder()\n .location(\"us-central1\")\n .connectionProfileId(\"my-profileid\")\n .displayName(\"my-profileid_display\")\n .labels(Map.of(\"foo\", \"bar\"))\n .postgresql(ConnectionProfilePostgresqlArgs.builder()\n .host(postgresqldb.ipAddresses().applyValue(ipAddresses -\u003e ipAddresses[0].ipAddress()))\n .port(5432)\n .username(sqldbUser.name())\n .password(sqldbUser.password())\n .ssl(ConnectionProfilePostgresqlSslArgs.builder()\n .clientKey(sqlClientCert.privateKey())\n .clientCertificate(sqlClientCert.cert())\n .caCertificate(sqlClientCert.serverCaCert())\n .build())\n .cloudSqlId(\"my-database\")\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(sqldbUser)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n postgresqldb:\n type: gcp:sql:DatabaseInstance\n properties:\n name: my-database\n databaseVersion: POSTGRES_12\n settings:\n tier: db-custom-2-13312\n deletionProtection: false\n sqlClientCert:\n type: gcp:sql:SslCert\n name: sql_client_cert\n properties:\n commonName: my-cert\n instance: ${postgresqldb.name}\n options:\n dependson:\n - ${postgresqldb}\n sqldbUser:\n type: gcp:sql:User\n name: sqldb_user\n properties:\n name: my-username\n instance: ${postgresqldb.name}\n password: my-password\n options:\n dependson:\n - ${sqlClientCert}\n postgresprofile:\n type: gcp:databasemigrationservice:ConnectionProfile\n properties:\n location: us-central1\n connectionProfileId: my-profileid\n displayName: my-profileid_display\n labels:\n foo: bar\n postgresql:\n host: ${postgresqldb.ipAddresses[0].ipAddress}\n port: 5432\n username: ${sqldbUser.name}\n password: ${sqldbUser.password}\n ssl:\n clientKey: ${sqlClientCert.privateKey}\n clientCertificate: ${sqlClientCert.cert}\n caCertificate: ${sqlClientCert.serverCaCert}\n cloudSqlId: my-database\n options:\n dependson:\n - ${sqldbUser}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Database Migration Service Connection Profile Oracle\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst oracleprofile = new gcp.databasemigrationservice.ConnectionProfile(\"oracleprofile\", {\n location: \"us-central1\",\n connectionProfileId: \"my-profileid\",\n displayName: \"my-profileid_display\",\n labels: {\n foo: \"bar\",\n },\n oracle: {\n host: \"host\",\n port: 1521,\n username: \"username\",\n password: \"password\",\n databaseService: \"dbprovider\",\n staticServiceIpConnectivity: {},\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\noracleprofile = gcp.databasemigrationservice.ConnectionProfile(\"oracleprofile\",\n location=\"us-central1\",\n connection_profile_id=\"my-profileid\",\n display_name=\"my-profileid_display\",\n labels={\n \"foo\": \"bar\",\n },\n oracle={\n \"host\": \"host\",\n \"port\": 1521,\n \"username\": \"username\",\n \"password\": \"password\",\n \"database_service\": \"dbprovider\",\n \"static_service_ip_connectivity\": {},\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var oracleprofile = new Gcp.DatabaseMigrationService.ConnectionProfile(\"oracleprofile\", new()\n {\n Location = \"us-central1\",\n ConnectionProfileId = \"my-profileid\",\n DisplayName = \"my-profileid_display\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Oracle = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileOracleArgs\n {\n Host = \"host\",\n Port = 1521,\n Username = \"username\",\n Password = \"password\",\n DatabaseService = \"dbprovider\",\n StaticServiceIpConnectivity = null,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/databasemigrationservice\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databasemigrationservice.NewConnectionProfile(ctx, \"oracleprofile\", \u0026databasemigrationservice.ConnectionProfileArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"my-profileid\"),\n\t\t\tDisplayName: pulumi.String(\"my-profileid_display\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tOracle: \u0026databasemigrationservice.ConnectionProfileOracleArgs{\n\t\t\t\tHost: pulumi.String(\"host\"),\n\t\t\t\tPort: pulumi.Int(1521),\n\t\t\t\tUsername: pulumi.String(\"username\"),\n\t\t\t\tPassword: pulumi.String(\"password\"),\n\t\t\t\tDatabaseService: pulumi.String(\"dbprovider\"),\n\t\t\t\tStaticServiceIpConnectivity: nil,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfile;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfileArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileOracleArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileOracleStaticServiceIpConnectivityArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var oracleprofile = new ConnectionProfile(\"oracleprofile\", ConnectionProfileArgs.builder()\n .location(\"us-central1\")\n .connectionProfileId(\"my-profileid\")\n .displayName(\"my-profileid_display\")\n .labels(Map.of(\"foo\", \"bar\"))\n .oracle(ConnectionProfileOracleArgs.builder()\n .host(\"host\")\n .port(1521)\n .username(\"username\")\n .password(\"password\")\n .databaseService(\"dbprovider\")\n .staticServiceIpConnectivity()\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n oracleprofile:\n type: gcp:databasemigrationservice:ConnectionProfile\n properties:\n location: us-central1\n connectionProfileId: my-profileid\n displayName: my-profileid_display\n labels:\n foo: bar\n oracle:\n host: host\n port: 1521\n username: username\n password: password\n databaseService: dbprovider\n staticServiceIpConnectivity: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Database Migration Service Connection Profile Alloydb\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst project = gcp.organizations.getProject({});\nconst _default = new gcp.compute.Network(\"default\", {name: \"vpc-network\"});\nconst privateIpAlloc = new gcp.compute.GlobalAddress(\"private_ip_alloc\", {\n name: \"private-ip-alloc\",\n addressType: \"INTERNAL\",\n purpose: \"VPC_PEERING\",\n prefixLength: 16,\n network: _default.id,\n});\nconst vpcConnection = new gcp.servicenetworking.Connection(\"vpc_connection\", {\n network: _default.id,\n service: \"servicenetworking.googleapis.com\",\n reservedPeeringRanges: [privateIpAlloc.name],\n});\nconst alloydbprofile = new gcp.databasemigrationservice.ConnectionProfile(\"alloydbprofile\", {\n location: \"us-central1\",\n connectionProfileId: \"my-profileid\",\n displayName: \"my-profileid_display\",\n labels: {\n foo: \"bar\",\n },\n alloydb: {\n clusterId: \"tf-test-dbmsalloycluster_52865\",\n settings: {\n initialUser: {\n user: \"alloyuser_85840\",\n password: \"alloypass_60302\",\n },\n vpcNetwork: _default.id,\n labels: {\n alloyfoo: \"alloybar\",\n },\n primaryInstanceSettings: {\n id: \"priminstid\",\n machineConfig: {\n cpuCount: 2,\n },\n databaseFlags: {},\n labels: {\n alloysinstfoo: \"allowinstbar\",\n },\n },\n },\n },\n}, {\n dependsOn: [vpcConnection],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproject = gcp.organizations.get_project()\ndefault = gcp.compute.Network(\"default\", name=\"vpc-network\")\nprivate_ip_alloc = gcp.compute.GlobalAddress(\"private_ip_alloc\",\n name=\"private-ip-alloc\",\n address_type=\"INTERNAL\",\n purpose=\"VPC_PEERING\",\n prefix_length=16,\n network=default.id)\nvpc_connection = gcp.servicenetworking.Connection(\"vpc_connection\",\n network=default.id,\n service=\"servicenetworking.googleapis.com\",\n reserved_peering_ranges=[private_ip_alloc.name])\nalloydbprofile = gcp.databasemigrationservice.ConnectionProfile(\"alloydbprofile\",\n location=\"us-central1\",\n connection_profile_id=\"my-profileid\",\n display_name=\"my-profileid_display\",\n labels={\n \"foo\": \"bar\",\n },\n alloydb={\n \"cluster_id\": \"tf-test-dbmsalloycluster_52865\",\n \"settings\": {\n \"initial_user\": {\n \"user\": \"alloyuser_85840\",\n \"password\": \"alloypass_60302\",\n },\n \"vpc_network\": default.id,\n \"labels\": {\n \"alloyfoo\": \"alloybar\",\n },\n \"primary_instance_settings\": {\n \"id\": \"priminstid\",\n \"machine_config\": {\n \"cpu_count\": 2,\n },\n \"database_flags\": {},\n \"labels\": {\n \"alloysinstfoo\": \"allowinstbar\",\n },\n },\n },\n },\n opts = pulumi.ResourceOptions(depends_on=[vpc_connection]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var @default = new Gcp.Compute.Network(\"default\", new()\n {\n Name = \"vpc-network\",\n });\n\n var privateIpAlloc = new Gcp.Compute.GlobalAddress(\"private_ip_alloc\", new()\n {\n Name = \"private-ip-alloc\",\n AddressType = \"INTERNAL\",\n Purpose = \"VPC_PEERING\",\n PrefixLength = 16,\n Network = @default.Id,\n });\n\n var vpcConnection = new Gcp.ServiceNetworking.Connection(\"vpc_connection\", new()\n {\n Network = @default.Id,\n Service = \"servicenetworking.googleapis.com\",\n ReservedPeeringRanges = new[]\n {\n privateIpAlloc.Name,\n },\n });\n\n var alloydbprofile = new Gcp.DatabaseMigrationService.ConnectionProfile(\"alloydbprofile\", new()\n {\n Location = \"us-central1\",\n ConnectionProfileId = \"my-profileid\",\n DisplayName = \"my-profileid_display\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Alloydb = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileAlloydbArgs\n {\n ClusterId = \"tf-test-dbmsalloycluster_52865\",\n Settings = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileAlloydbSettingsArgs\n {\n InitialUser = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileAlloydbSettingsInitialUserArgs\n {\n User = \"alloyuser_85840\",\n Password = \"alloypass_60302\",\n },\n VpcNetwork = @default.Id,\n Labels = \n {\n { \"alloyfoo\", \"alloybar\" },\n },\n PrimaryInstanceSettings = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsArgs\n {\n Id = \"priminstid\",\n MachineConfig = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfigArgs\n {\n CpuCount = 2,\n },\n DatabaseFlags = null,\n Labels = \n {\n { \"alloysinstfoo\", \"allowinstbar\" },\n },\n },\n },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n vpcConnection,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/databasemigrationservice\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/servicenetworking\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewNetwork(ctx, \"default\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"vpc-network\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tprivateIpAlloc, err := compute.NewGlobalAddress(ctx, \"private_ip_alloc\", \u0026compute.GlobalAddressArgs{\n\t\t\tName: pulumi.String(\"private-ip-alloc\"),\n\t\t\tAddressType: pulumi.String(\"INTERNAL\"),\n\t\t\tPurpose: pulumi.String(\"VPC_PEERING\"),\n\t\t\tPrefixLength: pulumi.Int(16),\n\t\t\tNetwork: _default.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvpcConnection, err := servicenetworking.NewConnection(ctx, \"vpc_connection\", \u0026servicenetworking.ConnectionArgs{\n\t\t\tNetwork: _default.ID(),\n\t\t\tService: pulumi.String(\"servicenetworking.googleapis.com\"),\n\t\t\tReservedPeeringRanges: pulumi.StringArray{\n\t\t\t\tprivateIpAlloc.Name,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databasemigrationservice.NewConnectionProfile(ctx, \"alloydbprofile\", \u0026databasemigrationservice.ConnectionProfileArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"my-profileid\"),\n\t\t\tDisplayName: pulumi.String(\"my-profileid_display\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tAlloydb: \u0026databasemigrationservice.ConnectionProfileAlloydbArgs{\n\t\t\t\tClusterId: pulumi.String(\"tf-test-dbmsalloycluster_52865\"),\n\t\t\t\tSettings: \u0026databasemigrationservice.ConnectionProfileAlloydbSettingsArgs{\n\t\t\t\t\tInitialUser: \u0026databasemigrationservice.ConnectionProfileAlloydbSettingsInitialUserArgs{\n\t\t\t\t\t\tUser: pulumi.String(\"alloyuser_85840\"),\n\t\t\t\t\t\tPassword: pulumi.String(\"alloypass_60302\"),\n\t\t\t\t\t},\n\t\t\t\t\tVpcNetwork: _default.ID(),\n\t\t\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\t\t\"alloyfoo\": pulumi.String(\"alloybar\"),\n\t\t\t\t\t},\n\t\t\t\t\tPrimaryInstanceSettings: \u0026databasemigrationservice.ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsArgs{\n\t\t\t\t\t\tId: pulumi.String(\"priminstid\"),\n\t\t\t\t\t\tMachineConfig: \u0026databasemigrationservice.ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfigArgs{\n\t\t\t\t\t\t\tCpuCount: pulumi.Int(2),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDatabaseFlags: nil,\n\t\t\t\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\t\t\t\"alloysinstfoo\": pulumi.String(\"allowinstbar\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tvpcConnection,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.compute.GlobalAddress;\nimport com.pulumi.gcp.compute.GlobalAddressArgs;\nimport com.pulumi.gcp.servicenetworking.Connection;\nimport com.pulumi.gcp.servicenetworking.ConnectionArgs;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfile;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfileArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileAlloydbArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileAlloydbSettingsArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileAlloydbSettingsInitialUserArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var default_ = new Network(\"default\", NetworkArgs.builder()\n .name(\"vpc-network\")\n .build());\n\n var privateIpAlloc = new GlobalAddress(\"privateIpAlloc\", GlobalAddressArgs.builder()\n .name(\"private-ip-alloc\")\n .addressType(\"INTERNAL\")\n .purpose(\"VPC_PEERING\")\n .prefixLength(16)\n .network(default_.id())\n .build());\n\n var vpcConnection = new Connection(\"vpcConnection\", ConnectionArgs.builder()\n .network(default_.id())\n .service(\"servicenetworking.googleapis.com\")\n .reservedPeeringRanges(privateIpAlloc.name())\n .build());\n\n var alloydbprofile = new ConnectionProfile(\"alloydbprofile\", ConnectionProfileArgs.builder()\n .location(\"us-central1\")\n .connectionProfileId(\"my-profileid\")\n .displayName(\"my-profileid_display\")\n .labels(Map.of(\"foo\", \"bar\"))\n .alloydb(ConnectionProfileAlloydbArgs.builder()\n .clusterId(\"tf-test-dbmsalloycluster_52865\")\n .settings(ConnectionProfileAlloydbSettingsArgs.builder()\n .initialUser(ConnectionProfileAlloydbSettingsInitialUserArgs.builder()\n .user(\"alloyuser_85840\")\n .password(\"alloypass_60302\")\n .build())\n .vpcNetwork(default_.id())\n .labels(Map.of(\"alloyfoo\", \"alloybar\"))\n .primaryInstanceSettings(ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsArgs.builder()\n .id(\"priminstid\")\n .machineConfig(ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfigArgs.builder()\n .cpuCount(2)\n .build())\n .databaseFlags()\n .labels(Map.of(\"alloysinstfoo\", \"allowinstbar\"))\n .build())\n .build())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(vpcConnection)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:compute:Network\n properties:\n name: vpc-network\n privateIpAlloc:\n type: gcp:compute:GlobalAddress\n name: private_ip_alloc\n properties:\n name: private-ip-alloc\n addressType: INTERNAL\n purpose: VPC_PEERING\n prefixLength: 16\n network: ${default.id}\n vpcConnection:\n type: gcp:servicenetworking:Connection\n name: vpc_connection\n properties:\n network: ${default.id}\n service: servicenetworking.googleapis.com\n reservedPeeringRanges:\n - ${privateIpAlloc.name}\n alloydbprofile:\n type: gcp:databasemigrationservice:ConnectionProfile\n properties:\n location: us-central1\n connectionProfileId: my-profileid\n displayName: my-profileid_display\n labels:\n foo: bar\n alloydb:\n clusterId: tf-test-dbmsalloycluster_52865\n settings:\n initialUser:\n user: alloyuser_85840\n password: alloypass_60302\n vpcNetwork: ${default.id}\n labels:\n alloyfoo: alloybar\n primaryInstanceSettings:\n id: priminstid\n machineConfig:\n cpuCount: 2\n databaseFlags: {}\n labels:\n alloysinstfoo: allowinstbar\n options:\n dependson:\n - ${vpcConnection}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nConnectionProfile can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}`\n\n* `{{project}}/{{location}}/{{connection_profile_id}}`\n\n* `{{location}}/{{connection_profile_id}}`\n\nWhen using the `pulumi import` command, ConnectionProfile can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:databasemigrationservice/connectionProfile:ConnectionProfile default projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}\n```\n\n```sh\n$ pulumi import gcp:databasemigrationservice/connectionProfile:ConnectionProfile default {{project}}/{{location}}/{{connection_profile_id}}\n```\n\n```sh\n$ pulumi import gcp:databasemigrationservice/connectionProfile:ConnectionProfile default {{location}}/{{connection_profile_id}}\n```\n\n", + "description": "A connection profile definition.\n\n\nTo get more information about ConnectionProfile, see:\n\n* [API documentation](https://cloud.google.com/database-migration/docs/reference/rest/v1/projects.locations.connectionProfiles/create)\n* How-to Guides\n * [Database Migration](https://cloud.google.com/database-migration/docs/)\n\n\n\n## Example Usage\n\n### Database Migration Service Connection Profile Cloudsql\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst project = gcp.organizations.getProject({});\nconst cloudsqldb = new gcp.sql.DatabaseInstance(\"cloudsqldb\", {\n name: \"my-database\",\n databaseVersion: \"MYSQL_5_7\",\n settings: {\n tier: \"db-n1-standard-1\",\n deletionProtectionEnabled: false,\n },\n deletionProtection: false,\n});\nconst sqlClientCert = new gcp.sql.SslCert(\"sql_client_cert\", {\n commonName: \"my-cert\",\n instance: cloudsqldb.name,\n}, {\n dependsOn: [cloudsqldb],\n});\nconst sqldbUser = new gcp.sql.User(\"sqldb_user\", {\n name: \"my-username\",\n instance: cloudsqldb.name,\n password: \"my-password\",\n}, {\n dependsOn: [sqlClientCert],\n});\nconst cloudsqlprofile = new gcp.databasemigrationservice.ConnectionProfile(\"cloudsqlprofile\", {\n location: \"us-central1\",\n connectionProfileId: \"my-fromprofileid\",\n displayName: \"my-fromprofileid_display\",\n labels: {\n foo: \"bar\",\n },\n mysql: {\n host: cloudsqldb.ipAddresses.apply(ipAddresses =\u003e ipAddresses[0].ipAddress),\n port: 3306,\n username: sqldbUser.name,\n password: sqldbUser.password,\n ssl: {\n clientKey: sqlClientCert.privateKey,\n clientCertificate: sqlClientCert.cert,\n caCertificate: sqlClientCert.serverCaCert,\n },\n cloudSqlId: \"my-database\",\n },\n}, {\n dependsOn: [sqldbUser],\n});\nconst cloudsqlprofileDestination = new gcp.databasemigrationservice.ConnectionProfile(\"cloudsqlprofile_destination\", {\n location: \"us-central1\",\n connectionProfileId: \"my-toprofileid\",\n displayName: \"my-toprofileid_displayname\",\n labels: {\n foo: \"bar\",\n },\n cloudsql: {\n settings: {\n databaseVersion: \"MYSQL_5_7\",\n userLabels: {\n cloudfoo: \"cloudbar\",\n },\n tier: \"db-n1-standard-1\",\n edition: \"ENTERPRISE\",\n storageAutoResizeLimit: \"0\",\n activationPolicy: \"ALWAYS\",\n ipConfig: {\n enableIpv4: true,\n requireSsl: true,\n },\n autoStorageIncrease: true,\n dataDiskType: \"PD_HDD\",\n dataDiskSizeGb: \"11\",\n zone: \"us-central1-b\",\n sourceId: project.then(project =\u003e `projects/${project.projectId}/locations/us-central1/connectionProfiles/my-fromprofileid`),\n rootPassword: \"testpasscloudsql\",\n },\n },\n}, {\n dependsOn: [cloudsqlprofile],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproject = gcp.organizations.get_project()\ncloudsqldb = gcp.sql.DatabaseInstance(\"cloudsqldb\",\n name=\"my-database\",\n database_version=\"MYSQL_5_7\",\n settings={\n \"tier\": \"db-n1-standard-1\",\n \"deletion_protection_enabled\": False,\n },\n deletion_protection=False)\nsql_client_cert = gcp.sql.SslCert(\"sql_client_cert\",\n common_name=\"my-cert\",\n instance=cloudsqldb.name,\n opts = pulumi.ResourceOptions(depends_on=[cloudsqldb]))\nsqldb_user = gcp.sql.User(\"sqldb_user\",\n name=\"my-username\",\n instance=cloudsqldb.name,\n password=\"my-password\",\n opts = pulumi.ResourceOptions(depends_on=[sql_client_cert]))\ncloudsqlprofile = gcp.databasemigrationservice.ConnectionProfile(\"cloudsqlprofile\",\n location=\"us-central1\",\n connection_profile_id=\"my-fromprofileid\",\n display_name=\"my-fromprofileid_display\",\n labels={\n \"foo\": \"bar\",\n },\n mysql={\n \"host\": cloudsqldb.ip_addresses[0].ip_address,\n \"port\": 3306,\n \"username\": sqldb_user.name,\n \"password\": sqldb_user.password,\n \"ssl\": {\n \"client_key\": sql_client_cert.private_key,\n \"client_certificate\": sql_client_cert.cert,\n \"ca_certificate\": sql_client_cert.server_ca_cert,\n },\n \"cloud_sql_id\": \"my-database\",\n },\n opts = pulumi.ResourceOptions(depends_on=[sqldb_user]))\ncloudsqlprofile_destination = gcp.databasemigrationservice.ConnectionProfile(\"cloudsqlprofile_destination\",\n location=\"us-central1\",\n connection_profile_id=\"my-toprofileid\",\n display_name=\"my-toprofileid_displayname\",\n labels={\n \"foo\": \"bar\",\n },\n cloudsql={\n \"settings\": {\n \"database_version\": \"MYSQL_5_7\",\n \"user_labels\": {\n \"cloudfoo\": \"cloudbar\",\n },\n \"tier\": \"db-n1-standard-1\",\n \"edition\": \"ENTERPRISE\",\n \"storage_auto_resize_limit\": \"0\",\n \"activation_policy\": \"ALWAYS\",\n \"ip_config\": {\n \"enable_ipv4\": True,\n \"require_ssl\": True,\n },\n \"auto_storage_increase\": True,\n \"data_disk_type\": \"PD_HDD\",\n \"data_disk_size_gb\": \"11\",\n \"zone\": \"us-central1-b\",\n \"source_id\": f\"projects/{project.project_id}/locations/us-central1/connectionProfiles/my-fromprofileid\",\n \"root_password\": \"testpasscloudsql\",\n },\n },\n opts = pulumi.ResourceOptions(depends_on=[cloudsqlprofile]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var cloudsqldb = new Gcp.Sql.DatabaseInstance(\"cloudsqldb\", new()\n {\n Name = \"my-database\",\n DatabaseVersion = \"MYSQL_5_7\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-n1-standard-1\",\n DeletionProtectionEnabled = false,\n },\n DeletionProtection = false,\n });\n\n var sqlClientCert = new Gcp.Sql.SslCert(\"sql_client_cert\", new()\n {\n CommonName = \"my-cert\",\n Instance = cloudsqldb.Name,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n cloudsqldb,\n },\n });\n\n var sqldbUser = new Gcp.Sql.User(\"sqldb_user\", new()\n {\n Name = \"my-username\",\n Instance = cloudsqldb.Name,\n Password = \"my-password\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n sqlClientCert,\n },\n });\n\n var cloudsqlprofile = new Gcp.DatabaseMigrationService.ConnectionProfile(\"cloudsqlprofile\", new()\n {\n Location = \"us-central1\",\n ConnectionProfileId = \"my-fromprofileid\",\n DisplayName = \"my-fromprofileid_display\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Mysql = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileMysqlArgs\n {\n Host = cloudsqldb.IpAddresses.Apply(ipAddresses =\u003e ipAddresses[0].IpAddress),\n Port = 3306,\n Username = sqldbUser.Name,\n Password = sqldbUser.Password,\n Ssl = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileMysqlSslArgs\n {\n ClientKey = sqlClientCert.PrivateKey,\n ClientCertificate = sqlClientCert.Cert,\n CaCertificate = sqlClientCert.ServerCaCert,\n },\n CloudSqlId = \"my-database\",\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n sqldbUser,\n },\n });\n\n var cloudsqlprofileDestination = new Gcp.DatabaseMigrationService.ConnectionProfile(\"cloudsqlprofile_destination\", new()\n {\n Location = \"us-central1\",\n ConnectionProfileId = \"my-toprofileid\",\n DisplayName = \"my-toprofileid_displayname\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Cloudsql = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileCloudsqlArgs\n {\n Settings = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileCloudsqlSettingsArgs\n {\n DatabaseVersion = \"MYSQL_5_7\",\n UserLabels = \n {\n { \"cloudfoo\", \"cloudbar\" },\n },\n Tier = \"db-n1-standard-1\",\n Edition = \"ENTERPRISE\",\n StorageAutoResizeLimit = \"0\",\n ActivationPolicy = \"ALWAYS\",\n IpConfig = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileCloudsqlSettingsIpConfigArgs\n {\n EnableIpv4 = true,\n RequireSsl = true,\n },\n AutoStorageIncrease = true,\n DataDiskType = \"PD_HDD\",\n DataDiskSizeGb = \"11\",\n Zone = \"us-central1-b\",\n SourceId = $\"projects/{project.Apply(getProjectResult =\u003e getProjectResult.ProjectId)}/locations/us-central1/connectionProfiles/my-fromprofileid\",\n RootPassword = \"testpasscloudsql\",\n },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n cloudsqlprofile,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/databasemigrationservice\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcloudsqldb, err := sql.NewDatabaseInstance(ctx, \"cloudsqldb\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"my-database\"),\n\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_5_7\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-n1-standard-1\"),\n\t\t\t\tDeletionProtectionEnabled: pulumi.Bool(false),\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsqlClientCert, err := sql.NewSslCert(ctx, \"sql_client_cert\", \u0026sql.SslCertArgs{\n\t\t\tCommonName: pulumi.String(\"my-cert\"),\n\t\t\tInstance: cloudsqldb.Name,\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tcloudsqldb,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsqldbUser, err := sql.NewUser(ctx, \"sqldb_user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"my-username\"),\n\t\t\tInstance: cloudsqldb.Name,\n\t\t\tPassword: pulumi.String(\"my-password\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsqlClientCert,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcloudsqlprofile, err := databasemigrationservice.NewConnectionProfile(ctx, \"cloudsqlprofile\", \u0026databasemigrationservice.ConnectionProfileArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"my-fromprofileid\"),\n\t\t\tDisplayName: pulumi.String(\"my-fromprofileid_display\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tMysql: \u0026databasemigrationservice.ConnectionProfileMysqlArgs{\n\t\t\t\tHost: cloudsqldb.IpAddresses.ApplyT(func(ipAddresses []sql.DatabaseInstanceIpAddress) (*string, error) {\n\t\t\t\t\treturn \u0026ipAddresses[0].IpAddress, nil\n\t\t\t\t}).(pulumi.StringPtrOutput),\n\t\t\t\tPort: pulumi.Int(3306),\n\t\t\t\tUsername: sqldbUser.Name,\n\t\t\t\tPassword: sqldbUser.Password,\n\t\t\t\tSsl: \u0026databasemigrationservice.ConnectionProfileMysqlSslArgs{\n\t\t\t\t\tClientKey: sqlClientCert.PrivateKey,\n\t\t\t\t\tClientCertificate: sqlClientCert.Cert,\n\t\t\t\t\tCaCertificate: sqlClientCert.ServerCaCert,\n\t\t\t\t},\n\t\t\t\tCloudSqlId: pulumi.String(\"my-database\"),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsqldbUser,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databasemigrationservice.NewConnectionProfile(ctx, \"cloudsqlprofile_destination\", \u0026databasemigrationservice.ConnectionProfileArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"my-toprofileid\"),\n\t\t\tDisplayName: pulumi.String(\"my-toprofileid_displayname\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tCloudsql: \u0026databasemigrationservice.ConnectionProfileCloudsqlArgs{\n\t\t\t\tSettings: \u0026databasemigrationservice.ConnectionProfileCloudsqlSettingsArgs{\n\t\t\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_5_7\"),\n\t\t\t\t\tUserLabels: pulumi.StringMap{\n\t\t\t\t\t\t\"cloudfoo\": pulumi.String(\"cloudbar\"),\n\t\t\t\t\t},\n\t\t\t\t\tTier: pulumi.String(\"db-n1-standard-1\"),\n\t\t\t\t\tEdition: pulumi.String(\"ENTERPRISE\"),\n\t\t\t\t\tStorageAutoResizeLimit: pulumi.String(\"0\"),\n\t\t\t\t\tActivationPolicy: pulumi.String(\"ALWAYS\"),\n\t\t\t\t\tIpConfig: \u0026databasemigrationservice.ConnectionProfileCloudsqlSettingsIpConfigArgs{\n\t\t\t\t\t\tEnableIpv4: pulumi.Bool(true),\n\t\t\t\t\t\tRequireSsl: pulumi.Bool(true),\n\t\t\t\t\t},\n\t\t\t\t\tAutoStorageIncrease: pulumi.Bool(true),\n\t\t\t\t\tDataDiskType: pulumi.String(\"PD_HDD\"),\n\t\t\t\t\tDataDiskSizeGb: pulumi.String(\"11\"),\n\t\t\t\t\tZone: pulumi.String(\"us-central1-b\"),\n\t\t\t\t\tSourceId: pulumi.Sprintf(\"projects/%v/locations/us-central1/connectionProfiles/my-fromprofileid\", project.ProjectId),\n\t\t\t\t\tRootPassword: pulumi.String(\"testpasscloudsql\"),\n\t\t\t\t},\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tcloudsqlprofile,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.SslCert;\nimport com.pulumi.gcp.sql.SslCertArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfile;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfileArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileMysqlArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileMysqlSslArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileCloudsqlArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileCloudsqlSettingsArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileCloudsqlSettingsIpConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var cloudsqldb = new DatabaseInstance(\"cloudsqldb\", DatabaseInstanceArgs.builder()\n .name(\"my-database\")\n .databaseVersion(\"MYSQL_5_7\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-n1-standard-1\")\n .deletionProtectionEnabled(false)\n .build())\n .deletionProtection(false)\n .build());\n\n var sqlClientCert = new SslCert(\"sqlClientCert\", SslCertArgs.builder()\n .commonName(\"my-cert\")\n .instance(cloudsqldb.name())\n .build(), CustomResourceOptions.builder()\n .dependsOn(cloudsqldb)\n .build());\n\n var sqldbUser = new User(\"sqldbUser\", UserArgs.builder()\n .name(\"my-username\")\n .instance(cloudsqldb.name())\n .password(\"my-password\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(sqlClientCert)\n .build());\n\n var cloudsqlprofile = new ConnectionProfile(\"cloudsqlprofile\", ConnectionProfileArgs.builder()\n .location(\"us-central1\")\n .connectionProfileId(\"my-fromprofileid\")\n .displayName(\"my-fromprofileid_display\")\n .labels(Map.of(\"foo\", \"bar\"))\n .mysql(ConnectionProfileMysqlArgs.builder()\n .host(cloudsqldb.ipAddresses().applyValue(ipAddresses -\u003e ipAddresses[0].ipAddress()))\n .port(3306)\n .username(sqldbUser.name())\n .password(sqldbUser.password())\n .ssl(ConnectionProfileMysqlSslArgs.builder()\n .clientKey(sqlClientCert.privateKey())\n .clientCertificate(sqlClientCert.cert())\n .caCertificate(sqlClientCert.serverCaCert())\n .build())\n .cloudSqlId(\"my-database\")\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(sqldbUser)\n .build());\n\n var cloudsqlprofileDestination = new ConnectionProfile(\"cloudsqlprofileDestination\", ConnectionProfileArgs.builder()\n .location(\"us-central1\")\n .connectionProfileId(\"my-toprofileid\")\n .displayName(\"my-toprofileid_displayname\")\n .labels(Map.of(\"foo\", \"bar\"))\n .cloudsql(ConnectionProfileCloudsqlArgs.builder()\n .settings(ConnectionProfileCloudsqlSettingsArgs.builder()\n .databaseVersion(\"MYSQL_5_7\")\n .userLabels(Map.of(\"cloudfoo\", \"cloudbar\"))\n .tier(\"db-n1-standard-1\")\n .edition(\"ENTERPRISE\")\n .storageAutoResizeLimit(\"0\")\n .activationPolicy(\"ALWAYS\")\n .ipConfig(ConnectionProfileCloudsqlSettingsIpConfigArgs.builder()\n .enableIpv4(true)\n .requireSsl(true)\n .build())\n .autoStorageIncrease(true)\n .dataDiskType(\"PD_HDD\")\n .dataDiskSizeGb(\"11\")\n .zone(\"us-central1-b\")\n .sourceId(String.format(\"projects/%s/locations/us-central1/connectionProfiles/my-fromprofileid\", project.applyValue(getProjectResult -\u003e getProjectResult.projectId())))\n .rootPassword(\"testpasscloudsql\")\n .build())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(cloudsqlprofile)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cloudsqldb:\n type: gcp:sql:DatabaseInstance\n properties:\n name: my-database\n databaseVersion: MYSQL_5_7\n settings:\n tier: db-n1-standard-1\n deletionProtectionEnabled: false\n deletionProtection: false\n sqlClientCert:\n type: gcp:sql:SslCert\n name: sql_client_cert\n properties:\n commonName: my-cert\n instance: ${cloudsqldb.name}\n options:\n dependson:\n - ${cloudsqldb}\n sqldbUser:\n type: gcp:sql:User\n name: sqldb_user\n properties:\n name: my-username\n instance: ${cloudsqldb.name}\n password: my-password\n options:\n dependson:\n - ${sqlClientCert}\n cloudsqlprofile:\n type: gcp:databasemigrationservice:ConnectionProfile\n properties:\n location: us-central1\n connectionProfileId: my-fromprofileid\n displayName: my-fromprofileid_display\n labels:\n foo: bar\n mysql:\n host: ${cloudsqldb.ipAddresses[0].ipAddress}\n port: 3306\n username: ${sqldbUser.name}\n password: ${sqldbUser.password}\n ssl:\n clientKey: ${sqlClientCert.privateKey}\n clientCertificate: ${sqlClientCert.cert}\n caCertificate: ${sqlClientCert.serverCaCert}\n cloudSqlId: my-database\n options:\n dependson:\n - ${sqldbUser}\n cloudsqlprofileDestination:\n type: gcp:databasemigrationservice:ConnectionProfile\n name: cloudsqlprofile_destination\n properties:\n location: us-central1\n connectionProfileId: my-toprofileid\n displayName: my-toprofileid_displayname\n labels:\n foo: bar\n cloudsql:\n settings:\n databaseVersion: MYSQL_5_7\n userLabels:\n cloudfoo: cloudbar\n tier: db-n1-standard-1\n edition: ENTERPRISE\n storageAutoResizeLimit: '0'\n activationPolicy: ALWAYS\n ipConfig:\n enableIpv4: true\n requireSsl: true\n autoStorageIncrease: true\n dataDiskType: PD_HDD\n dataDiskSizeGb: '11'\n zone: us-central1-b\n sourceId: projects/${project.projectId}/locations/us-central1/connectionProfiles/my-fromprofileid\n rootPassword: testpasscloudsql\n options:\n dependson:\n - ${cloudsqlprofile}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Database Migration Service Connection Profile Postgres\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst postgresqldb = new gcp.sql.DatabaseInstance(\"postgresqldb\", {\n name: \"my-database\",\n databaseVersion: \"POSTGRES_12\",\n settings: {\n tier: \"db-custom-2-13312\",\n },\n deletionProtection: false,\n});\nconst sqlClientCert = new gcp.sql.SslCert(\"sql_client_cert\", {\n commonName: \"my-cert\",\n instance: postgresqldb.name,\n}, {\n dependsOn: [postgresqldb],\n});\nconst sqldbUser = new gcp.sql.User(\"sqldb_user\", {\n name: \"my-username\",\n instance: postgresqldb.name,\n password: \"my-password\",\n}, {\n dependsOn: [sqlClientCert],\n});\nconst postgresprofile = new gcp.databasemigrationservice.ConnectionProfile(\"postgresprofile\", {\n location: \"us-central1\",\n connectionProfileId: \"my-profileid\",\n displayName: \"my-profileid_display\",\n labels: {\n foo: \"bar\",\n },\n postgresql: {\n host: postgresqldb.ipAddresses.apply(ipAddresses =\u003e ipAddresses[0].ipAddress),\n port: 5432,\n username: sqldbUser.name,\n password: sqldbUser.password,\n ssl: {\n clientKey: sqlClientCert.privateKey,\n clientCertificate: sqlClientCert.cert,\n caCertificate: sqlClientCert.serverCaCert,\n },\n cloudSqlId: \"my-database\",\n },\n}, {\n dependsOn: [sqldbUser],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\npostgresqldb = gcp.sql.DatabaseInstance(\"postgresqldb\",\n name=\"my-database\",\n database_version=\"POSTGRES_12\",\n settings={\n \"tier\": \"db-custom-2-13312\",\n },\n deletion_protection=False)\nsql_client_cert = gcp.sql.SslCert(\"sql_client_cert\",\n common_name=\"my-cert\",\n instance=postgresqldb.name,\n opts = pulumi.ResourceOptions(depends_on=[postgresqldb]))\nsqldb_user = gcp.sql.User(\"sqldb_user\",\n name=\"my-username\",\n instance=postgresqldb.name,\n password=\"my-password\",\n opts = pulumi.ResourceOptions(depends_on=[sql_client_cert]))\npostgresprofile = gcp.databasemigrationservice.ConnectionProfile(\"postgresprofile\",\n location=\"us-central1\",\n connection_profile_id=\"my-profileid\",\n display_name=\"my-profileid_display\",\n labels={\n \"foo\": \"bar\",\n },\n postgresql={\n \"host\": postgresqldb.ip_addresses[0].ip_address,\n \"port\": 5432,\n \"username\": sqldb_user.name,\n \"password\": sqldb_user.password,\n \"ssl\": {\n \"client_key\": sql_client_cert.private_key,\n \"client_certificate\": sql_client_cert.cert,\n \"ca_certificate\": sql_client_cert.server_ca_cert,\n },\n \"cloud_sql_id\": \"my-database\",\n },\n opts = pulumi.ResourceOptions(depends_on=[sqldb_user]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var postgresqldb = new Gcp.Sql.DatabaseInstance(\"postgresqldb\", new()\n {\n Name = \"my-database\",\n DatabaseVersion = \"POSTGRES_12\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-custom-2-13312\",\n },\n DeletionProtection = false,\n });\n\n var sqlClientCert = new Gcp.Sql.SslCert(\"sql_client_cert\", new()\n {\n CommonName = \"my-cert\",\n Instance = postgresqldb.Name,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n postgresqldb,\n },\n });\n\n var sqldbUser = new Gcp.Sql.User(\"sqldb_user\", new()\n {\n Name = \"my-username\",\n Instance = postgresqldb.Name,\n Password = \"my-password\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n sqlClientCert,\n },\n });\n\n var postgresprofile = new Gcp.DatabaseMigrationService.ConnectionProfile(\"postgresprofile\", new()\n {\n Location = \"us-central1\",\n ConnectionProfileId = \"my-profileid\",\n DisplayName = \"my-profileid_display\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Postgresql = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfilePostgresqlArgs\n {\n Host = postgresqldb.IpAddresses.Apply(ipAddresses =\u003e ipAddresses[0].IpAddress),\n Port = 5432,\n Username = sqldbUser.Name,\n Password = sqldbUser.Password,\n Ssl = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfilePostgresqlSslArgs\n {\n ClientKey = sqlClientCert.PrivateKey,\n ClientCertificate = sqlClientCert.Cert,\n CaCertificate = sqlClientCert.ServerCaCert,\n },\n CloudSqlId = \"my-database\",\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n sqldbUser,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/databasemigrationservice\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpostgresqldb, err := sql.NewDatabaseInstance(ctx, \"postgresqldb\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"my-database\"),\n\t\t\tDatabaseVersion: pulumi.String(\"POSTGRES_12\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-custom-2-13312\"),\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsqlClientCert, err := sql.NewSslCert(ctx, \"sql_client_cert\", \u0026sql.SslCertArgs{\n\t\t\tCommonName: pulumi.String(\"my-cert\"),\n\t\t\tInstance: postgresqldb.Name,\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tpostgresqldb,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsqldbUser, err := sql.NewUser(ctx, \"sqldb_user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"my-username\"),\n\t\t\tInstance: postgresqldb.Name,\n\t\t\tPassword: pulumi.String(\"my-password\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsqlClientCert,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databasemigrationservice.NewConnectionProfile(ctx, \"postgresprofile\", \u0026databasemigrationservice.ConnectionProfileArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"my-profileid\"),\n\t\t\tDisplayName: pulumi.String(\"my-profileid_display\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tPostgresql: \u0026databasemigrationservice.ConnectionProfilePostgresqlArgs{\n\t\t\t\tHost: postgresqldb.IpAddresses.ApplyT(func(ipAddresses []sql.DatabaseInstanceIpAddress) (*string, error) {\n\t\t\t\t\treturn \u0026ipAddresses[0].IpAddress, nil\n\t\t\t\t}).(pulumi.StringPtrOutput),\n\t\t\t\tPort: pulumi.Int(5432),\n\t\t\t\tUsername: sqldbUser.Name,\n\t\t\t\tPassword: sqldbUser.Password,\n\t\t\t\tSsl: \u0026databasemigrationservice.ConnectionProfilePostgresqlSslArgs{\n\t\t\t\t\tClientKey: sqlClientCert.PrivateKey,\n\t\t\t\t\tClientCertificate: sqlClientCert.Cert,\n\t\t\t\t\tCaCertificate: sqlClientCert.ServerCaCert,\n\t\t\t\t},\n\t\t\t\tCloudSqlId: pulumi.String(\"my-database\"),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tsqldbUser,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.SslCert;\nimport com.pulumi.gcp.sql.SslCertArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfile;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfileArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfilePostgresqlArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfilePostgresqlSslArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var postgresqldb = new DatabaseInstance(\"postgresqldb\", DatabaseInstanceArgs.builder()\n .name(\"my-database\")\n .databaseVersion(\"POSTGRES_12\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-custom-2-13312\")\n .build())\n .deletionProtection(false)\n .build());\n\n var sqlClientCert = new SslCert(\"sqlClientCert\", SslCertArgs.builder()\n .commonName(\"my-cert\")\n .instance(postgresqldb.name())\n .build(), CustomResourceOptions.builder()\n .dependsOn(postgresqldb)\n .build());\n\n var sqldbUser = new User(\"sqldbUser\", UserArgs.builder()\n .name(\"my-username\")\n .instance(postgresqldb.name())\n .password(\"my-password\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(sqlClientCert)\n .build());\n\n var postgresprofile = new ConnectionProfile(\"postgresprofile\", ConnectionProfileArgs.builder()\n .location(\"us-central1\")\n .connectionProfileId(\"my-profileid\")\n .displayName(\"my-profileid_display\")\n .labels(Map.of(\"foo\", \"bar\"))\n .postgresql(ConnectionProfilePostgresqlArgs.builder()\n .host(postgresqldb.ipAddresses().applyValue(ipAddresses -\u003e ipAddresses[0].ipAddress()))\n .port(5432)\n .username(sqldbUser.name())\n .password(sqldbUser.password())\n .ssl(ConnectionProfilePostgresqlSslArgs.builder()\n .clientKey(sqlClientCert.privateKey())\n .clientCertificate(sqlClientCert.cert())\n .caCertificate(sqlClientCert.serverCaCert())\n .build())\n .cloudSqlId(\"my-database\")\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(sqldbUser)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n postgresqldb:\n type: gcp:sql:DatabaseInstance\n properties:\n name: my-database\n databaseVersion: POSTGRES_12\n settings:\n tier: db-custom-2-13312\n deletionProtection: false\n sqlClientCert:\n type: gcp:sql:SslCert\n name: sql_client_cert\n properties:\n commonName: my-cert\n instance: ${postgresqldb.name}\n options:\n dependson:\n - ${postgresqldb}\n sqldbUser:\n type: gcp:sql:User\n name: sqldb_user\n properties:\n name: my-username\n instance: ${postgresqldb.name}\n password: my-password\n options:\n dependson:\n - ${sqlClientCert}\n postgresprofile:\n type: gcp:databasemigrationservice:ConnectionProfile\n properties:\n location: us-central1\n connectionProfileId: my-profileid\n displayName: my-profileid_display\n labels:\n foo: bar\n postgresql:\n host: ${postgresqldb.ipAddresses[0].ipAddress}\n port: 5432\n username: ${sqldbUser.name}\n password: ${sqldbUser.password}\n ssl:\n clientKey: ${sqlClientCert.privateKey}\n clientCertificate: ${sqlClientCert.cert}\n caCertificate: ${sqlClientCert.serverCaCert}\n cloudSqlId: my-database\n options:\n dependson:\n - ${sqldbUser}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Database Migration Service Connection Profile Oracle\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst oracleprofile = new gcp.databasemigrationservice.ConnectionProfile(\"oracleprofile\", {\n location: \"us-central1\",\n connectionProfileId: \"my-profileid\",\n displayName: \"my-profileid_display\",\n labels: {\n foo: \"bar\",\n },\n oracle: {\n host: \"host\",\n port: 1521,\n username: \"username\",\n password: \"password\",\n databaseService: \"dbprovider\",\n staticServiceIpConnectivity: {},\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\noracleprofile = gcp.databasemigrationservice.ConnectionProfile(\"oracleprofile\",\n location=\"us-central1\",\n connection_profile_id=\"my-profileid\",\n display_name=\"my-profileid_display\",\n labels={\n \"foo\": \"bar\",\n },\n oracle={\n \"host\": \"host\",\n \"port\": 1521,\n \"username\": \"username\",\n \"password\": \"password\",\n \"database_service\": \"dbprovider\",\n \"static_service_ip_connectivity\": {},\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var oracleprofile = new Gcp.DatabaseMigrationService.ConnectionProfile(\"oracleprofile\", new()\n {\n Location = \"us-central1\",\n ConnectionProfileId = \"my-profileid\",\n DisplayName = \"my-profileid_display\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Oracle = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileOracleArgs\n {\n Host = \"host\",\n Port = 1521,\n Username = \"username\",\n Password = \"password\",\n DatabaseService = \"dbprovider\",\n StaticServiceIpConnectivity = null,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/databasemigrationservice\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := databasemigrationservice.NewConnectionProfile(ctx, \"oracleprofile\", \u0026databasemigrationservice.ConnectionProfileArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"my-profileid\"),\n\t\t\tDisplayName: pulumi.String(\"my-profileid_display\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tOracle: \u0026databasemigrationservice.ConnectionProfileOracleArgs{\n\t\t\t\tHost: pulumi.String(\"host\"),\n\t\t\t\tPort: pulumi.Int(1521),\n\t\t\t\tUsername: pulumi.String(\"username\"),\n\t\t\t\tPassword: pulumi.String(\"password\"),\n\t\t\t\tDatabaseService: pulumi.String(\"dbprovider\"),\n\t\t\t\tStaticServiceIpConnectivity: nil,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfile;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfileArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileOracleArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileOracleStaticServiceIpConnectivityArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var oracleprofile = new ConnectionProfile(\"oracleprofile\", ConnectionProfileArgs.builder()\n .location(\"us-central1\")\n .connectionProfileId(\"my-profileid\")\n .displayName(\"my-profileid_display\")\n .labels(Map.of(\"foo\", \"bar\"))\n .oracle(ConnectionProfileOracleArgs.builder()\n .host(\"host\")\n .port(1521)\n .username(\"username\")\n .password(\"password\")\n .databaseService(\"dbprovider\")\n .staticServiceIpConnectivity()\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n oracleprofile:\n type: gcp:databasemigrationservice:ConnectionProfile\n properties:\n location: us-central1\n connectionProfileId: my-profileid\n displayName: my-profileid_display\n labels:\n foo: bar\n oracle:\n host: host\n port: 1521\n username: username\n password: password\n databaseService: dbprovider\n staticServiceIpConnectivity: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Database Migration Service Connection Profile Alloydb\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst project = gcp.organizations.getProject({});\nconst _default = new gcp.compute.Network(\"default\", {name: \"vpc-network\"});\nconst privateIpAlloc = new gcp.compute.GlobalAddress(\"private_ip_alloc\", {\n name: \"private-ip-alloc\",\n addressType: \"INTERNAL\",\n purpose: \"VPC_PEERING\",\n prefixLength: 16,\n network: _default.id,\n});\nconst vpcConnection = new gcp.servicenetworking.Connection(\"vpc_connection\", {\n network: _default.id,\n service: \"servicenetworking.googleapis.com\",\n reservedPeeringRanges: [privateIpAlloc.name],\n});\nconst alloydbprofile = new gcp.databasemigrationservice.ConnectionProfile(\"alloydbprofile\", {\n location: \"us-central1\",\n connectionProfileId: \"my-profileid\",\n displayName: \"my-profileid_display\",\n labels: {\n foo: \"bar\",\n },\n alloydb: {\n clusterId: \"tf-test-dbmsalloycluster_52865\",\n settings: {\n initialUser: {\n user: \"alloyuser_85840\",\n password: \"alloypass_60302\",\n },\n vpcNetwork: _default.id,\n labels: {\n alloyfoo: \"alloybar\",\n },\n primaryInstanceSettings: {\n id: \"priminstid\",\n machineConfig: {\n cpuCount: 2,\n },\n databaseFlags: {},\n labels: {\n alloysinstfoo: \"allowinstbar\",\n },\n },\n },\n },\n}, {\n dependsOn: [vpcConnection],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproject = gcp.organizations.get_project()\ndefault = gcp.compute.Network(\"default\", name=\"vpc-network\")\nprivate_ip_alloc = gcp.compute.GlobalAddress(\"private_ip_alloc\",\n name=\"private-ip-alloc\",\n address_type=\"INTERNAL\",\n purpose=\"VPC_PEERING\",\n prefix_length=16,\n network=default.id)\nvpc_connection = gcp.servicenetworking.Connection(\"vpc_connection\",\n network=default.id,\n service=\"servicenetworking.googleapis.com\",\n reserved_peering_ranges=[private_ip_alloc.name])\nalloydbprofile = gcp.databasemigrationservice.ConnectionProfile(\"alloydbprofile\",\n location=\"us-central1\",\n connection_profile_id=\"my-profileid\",\n display_name=\"my-profileid_display\",\n labels={\n \"foo\": \"bar\",\n },\n alloydb={\n \"cluster_id\": \"tf-test-dbmsalloycluster_52865\",\n \"settings\": {\n \"initial_user\": {\n \"user\": \"alloyuser_85840\",\n \"password\": \"alloypass_60302\",\n },\n \"vpc_network\": default.id,\n \"labels\": {\n \"alloyfoo\": \"alloybar\",\n },\n \"primary_instance_settings\": {\n \"id\": \"priminstid\",\n \"machine_config\": {\n \"cpu_count\": 2,\n },\n \"database_flags\": {},\n \"labels\": {\n \"alloysinstfoo\": \"allowinstbar\",\n },\n },\n },\n },\n opts = pulumi.ResourceOptions(depends_on=[vpc_connection]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var @default = new Gcp.Compute.Network(\"default\", new()\n {\n Name = \"vpc-network\",\n });\n\n var privateIpAlloc = new Gcp.Compute.GlobalAddress(\"private_ip_alloc\", new()\n {\n Name = \"private-ip-alloc\",\n AddressType = \"INTERNAL\",\n Purpose = \"VPC_PEERING\",\n PrefixLength = 16,\n Network = @default.Id,\n });\n\n var vpcConnection = new Gcp.ServiceNetworking.Connection(\"vpc_connection\", new()\n {\n Network = @default.Id,\n Service = \"servicenetworking.googleapis.com\",\n ReservedPeeringRanges = new[]\n {\n privateIpAlloc.Name,\n },\n });\n\n var alloydbprofile = new Gcp.DatabaseMigrationService.ConnectionProfile(\"alloydbprofile\", new()\n {\n Location = \"us-central1\",\n ConnectionProfileId = \"my-profileid\",\n DisplayName = \"my-profileid_display\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Alloydb = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileAlloydbArgs\n {\n ClusterId = \"tf-test-dbmsalloycluster_52865\",\n Settings = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileAlloydbSettingsArgs\n {\n InitialUser = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileAlloydbSettingsInitialUserArgs\n {\n User = \"alloyuser_85840\",\n Password = \"alloypass_60302\",\n },\n VpcNetwork = @default.Id,\n Labels = \n {\n { \"alloyfoo\", \"alloybar\" },\n },\n PrimaryInstanceSettings = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsArgs\n {\n Id = \"priminstid\",\n MachineConfig = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfigArgs\n {\n CpuCount = 2,\n },\n DatabaseFlags = null,\n Labels = \n {\n { \"alloysinstfoo\", \"allowinstbar\" },\n },\n },\n },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n vpcConnection,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/databasemigrationservice\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/servicenetworking\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewNetwork(ctx, \"default\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"vpc-network\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tprivateIpAlloc, err := compute.NewGlobalAddress(ctx, \"private_ip_alloc\", \u0026compute.GlobalAddressArgs{\n\t\t\tName: pulumi.String(\"private-ip-alloc\"),\n\t\t\tAddressType: pulumi.String(\"INTERNAL\"),\n\t\t\tPurpose: pulumi.String(\"VPC_PEERING\"),\n\t\t\tPrefixLength: pulumi.Int(16),\n\t\t\tNetwork: _default.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvpcConnection, err := servicenetworking.NewConnection(ctx, \"vpc_connection\", \u0026servicenetworking.ConnectionArgs{\n\t\t\tNetwork: _default.ID(),\n\t\t\tService: pulumi.String(\"servicenetworking.googleapis.com\"),\n\t\t\tReservedPeeringRanges: pulumi.StringArray{\n\t\t\t\tprivateIpAlloc.Name,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databasemigrationservice.NewConnectionProfile(ctx, \"alloydbprofile\", \u0026databasemigrationservice.ConnectionProfileArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"my-profileid\"),\n\t\t\tDisplayName: pulumi.String(\"my-profileid_display\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tAlloydb: \u0026databasemigrationservice.ConnectionProfileAlloydbArgs{\n\t\t\t\tClusterId: pulumi.String(\"tf-test-dbmsalloycluster_52865\"),\n\t\t\t\tSettings: \u0026databasemigrationservice.ConnectionProfileAlloydbSettingsArgs{\n\t\t\t\t\tInitialUser: \u0026databasemigrationservice.ConnectionProfileAlloydbSettingsInitialUserArgs{\n\t\t\t\t\t\tUser: pulumi.String(\"alloyuser_85840\"),\n\t\t\t\t\t\tPassword: pulumi.String(\"alloypass_60302\"),\n\t\t\t\t\t},\n\t\t\t\t\tVpcNetwork: _default.ID(),\n\t\t\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\t\t\"alloyfoo\": pulumi.String(\"alloybar\"),\n\t\t\t\t\t},\n\t\t\t\t\tPrimaryInstanceSettings: \u0026databasemigrationservice.ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsArgs{\n\t\t\t\t\t\tId: pulumi.String(\"priminstid\"),\n\t\t\t\t\t\tMachineConfig: \u0026databasemigrationservice.ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfigArgs{\n\t\t\t\t\t\t\tCpuCount: pulumi.Int(2),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tDatabaseFlags: nil,\n\t\t\t\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\t\t\t\"alloysinstfoo\": pulumi.String(\"allowinstbar\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tvpcConnection,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.compute.GlobalAddress;\nimport com.pulumi.gcp.compute.GlobalAddressArgs;\nimport com.pulumi.gcp.servicenetworking.Connection;\nimport com.pulumi.gcp.servicenetworking.ConnectionArgs;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfile;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfileArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileAlloydbArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileAlloydbSettingsArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileAlloydbSettingsInitialUserArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var default_ = new Network(\"default\", NetworkArgs.builder()\n .name(\"vpc-network\")\n .build());\n\n var privateIpAlloc = new GlobalAddress(\"privateIpAlloc\", GlobalAddressArgs.builder()\n .name(\"private-ip-alloc\")\n .addressType(\"INTERNAL\")\n .purpose(\"VPC_PEERING\")\n .prefixLength(16)\n .network(default_.id())\n .build());\n\n var vpcConnection = new Connection(\"vpcConnection\", ConnectionArgs.builder()\n .network(default_.id())\n .service(\"servicenetworking.googleapis.com\")\n .reservedPeeringRanges(privateIpAlloc.name())\n .build());\n\n var alloydbprofile = new ConnectionProfile(\"alloydbprofile\", ConnectionProfileArgs.builder()\n .location(\"us-central1\")\n .connectionProfileId(\"my-profileid\")\n .displayName(\"my-profileid_display\")\n .labels(Map.of(\"foo\", \"bar\"))\n .alloydb(ConnectionProfileAlloydbArgs.builder()\n .clusterId(\"tf-test-dbmsalloycluster_52865\")\n .settings(ConnectionProfileAlloydbSettingsArgs.builder()\n .initialUser(ConnectionProfileAlloydbSettingsInitialUserArgs.builder()\n .user(\"alloyuser_85840\")\n .password(\"alloypass_60302\")\n .build())\n .vpcNetwork(default_.id())\n .labels(Map.of(\"alloyfoo\", \"alloybar\"))\n .primaryInstanceSettings(ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsArgs.builder()\n .id(\"priminstid\")\n .machineConfig(ConnectionProfileAlloydbSettingsPrimaryInstanceSettingsMachineConfigArgs.builder()\n .cpuCount(2)\n .build())\n .databaseFlags()\n .labels(Map.of(\"alloysinstfoo\", \"allowinstbar\"))\n .build())\n .build())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(vpcConnection)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:compute:Network\n properties:\n name: vpc-network\n privateIpAlloc:\n type: gcp:compute:GlobalAddress\n name: private_ip_alloc\n properties:\n name: private-ip-alloc\n addressType: INTERNAL\n purpose: VPC_PEERING\n prefixLength: 16\n network: ${default.id}\n vpcConnection:\n type: gcp:servicenetworking:Connection\n name: vpc_connection\n properties:\n network: ${default.id}\n service: servicenetworking.googleapis.com\n reservedPeeringRanges:\n - ${privateIpAlloc.name}\n alloydbprofile:\n type: gcp:databasemigrationservice:ConnectionProfile\n properties:\n location: us-central1\n connectionProfileId: my-profileid\n displayName: my-profileid_display\n labels:\n foo: bar\n alloydb:\n clusterId: tf-test-dbmsalloycluster_52865\n settings:\n initialUser:\n user: alloyuser_85840\n password: alloypass_60302\n vpcNetwork: ${default.id}\n labels:\n alloyfoo: alloybar\n primaryInstanceSettings:\n id: priminstid\n machineConfig:\n cpuCount: 2\n databaseFlags: {}\n labels:\n alloysinstfoo: allowinstbar\n options:\n dependson:\n - ${vpcConnection}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Database Migration Service Connection Profile Existing Mysql\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst project = gcp.organizations.getProject({});\nconst destinationCsql = new gcp.sql.DatabaseInstance(\"destination_csql\", {\n name: \"destination-csql\",\n databaseVersion: \"MYSQL_5_7\",\n settings: {\n tier: \"db-n1-standard-1\",\n deletionProtectionEnabled: false,\n },\n deletionProtection: false,\n});\nconst existing_mysql = new gcp.databasemigrationservice.ConnectionProfile(\"existing-mysql\", {\n location: \"us-central1\",\n connectionProfileId: \"destination-cp\",\n displayName: \"destination-cp_display\",\n labels: {\n foo: \"bar\",\n },\n mysql: {\n cloudSqlId: \"destination-csql\",\n },\n}, {\n dependsOn: [destinationCsql],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproject = gcp.organizations.get_project()\ndestination_csql = gcp.sql.DatabaseInstance(\"destination_csql\",\n name=\"destination-csql\",\n database_version=\"MYSQL_5_7\",\n settings={\n \"tier\": \"db-n1-standard-1\",\n \"deletion_protection_enabled\": False,\n },\n deletion_protection=False)\nexisting_mysql = gcp.databasemigrationservice.ConnectionProfile(\"existing-mysql\",\n location=\"us-central1\",\n connection_profile_id=\"destination-cp\",\n display_name=\"destination-cp_display\",\n labels={\n \"foo\": \"bar\",\n },\n mysql={\n \"cloud_sql_id\": \"destination-csql\",\n },\n opts = pulumi.ResourceOptions(depends_on=[destination_csql]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var destinationCsql = new Gcp.Sql.DatabaseInstance(\"destination_csql\", new()\n {\n Name = \"destination-csql\",\n DatabaseVersion = \"MYSQL_5_7\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-n1-standard-1\",\n DeletionProtectionEnabled = false,\n },\n DeletionProtection = false,\n });\n\n var existing_mysql = new Gcp.DatabaseMigrationService.ConnectionProfile(\"existing-mysql\", new()\n {\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-cp\",\n DisplayName = \"destination-cp_display\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Mysql = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileMysqlArgs\n {\n CloudSqlId = \"destination-csql\",\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n destinationCsql,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/databasemigrationservice\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestinationCsql, err := sql.NewDatabaseInstance(ctx, \"destination_csql\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"destination-csql\"),\n\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_5_7\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-n1-standard-1\"),\n\t\t\t\tDeletionProtectionEnabled: pulumi.Bool(false),\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databasemigrationservice.NewConnectionProfile(ctx, \"existing-mysql\", \u0026databasemigrationservice.ConnectionProfileArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-cp\"),\n\t\t\tDisplayName: pulumi.String(\"destination-cp_display\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tMysql: \u0026databasemigrationservice.ConnectionProfileMysqlArgs{\n\t\t\t\tCloudSqlId: pulumi.String(\"destination-csql\"),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tdestinationCsql,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfile;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfileArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileMysqlArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var destinationCsql = new DatabaseInstance(\"destinationCsql\", DatabaseInstanceArgs.builder()\n .name(\"destination-csql\")\n .databaseVersion(\"MYSQL_5_7\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-n1-standard-1\")\n .deletionProtectionEnabled(false)\n .build())\n .deletionProtection(false)\n .build());\n\n var existing_mysql = new ConnectionProfile(\"existing-mysql\", ConnectionProfileArgs.builder()\n .location(\"us-central1\")\n .connectionProfileId(\"destination-cp\")\n .displayName(\"destination-cp_display\")\n .labels(Map.of(\"foo\", \"bar\"))\n .mysql(ConnectionProfileMysqlArgs.builder()\n .cloudSqlId(\"destination-csql\")\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(destinationCsql)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n destinationCsql:\n type: gcp:sql:DatabaseInstance\n name: destination_csql\n properties:\n name: destination-csql\n databaseVersion: MYSQL_5_7\n settings:\n tier: db-n1-standard-1\n deletionProtectionEnabled: false\n deletionProtection: false\n existing-mysql:\n type: gcp:databasemigrationservice:ConnectionProfile\n properties:\n location: us-central1\n connectionProfileId: destination-cp\n displayName: destination-cp_display\n labels:\n foo: bar\n mysql:\n cloudSqlId: destination-csql\n options:\n dependson:\n - ${destinationCsql}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Database Migration Service Connection Profile Existing Postgres\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst project = gcp.organizations.getProject({});\nconst destinationCsql = new gcp.sql.DatabaseInstance(\"destination_csql\", {\n name: \"destination-csql\",\n databaseVersion: \"POSTGRES_15\",\n settings: {\n tier: \"db-custom-2-13312\",\n deletionProtectionEnabled: false,\n },\n deletionProtection: false,\n});\nconst existing_psql = new gcp.databasemigrationservice.ConnectionProfile(\"existing-psql\", {\n location: \"us-central1\",\n connectionProfileId: \"destination-cp\",\n displayName: \"destination-cp_display\",\n labels: {\n foo: \"bar\",\n },\n postgresql: {\n cloudSqlId: \"destination-csql\",\n },\n}, {\n dependsOn: [destinationCsql],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproject = gcp.organizations.get_project()\ndestination_csql = gcp.sql.DatabaseInstance(\"destination_csql\",\n name=\"destination-csql\",\n database_version=\"POSTGRES_15\",\n settings={\n \"tier\": \"db-custom-2-13312\",\n \"deletion_protection_enabled\": False,\n },\n deletion_protection=False)\nexisting_psql = gcp.databasemigrationservice.ConnectionProfile(\"existing-psql\",\n location=\"us-central1\",\n connection_profile_id=\"destination-cp\",\n display_name=\"destination-cp_display\",\n labels={\n \"foo\": \"bar\",\n },\n postgresql={\n \"cloud_sql_id\": \"destination-csql\",\n },\n opts = pulumi.ResourceOptions(depends_on=[destination_csql]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var destinationCsql = new Gcp.Sql.DatabaseInstance(\"destination_csql\", new()\n {\n Name = \"destination-csql\",\n DatabaseVersion = \"POSTGRES_15\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-custom-2-13312\",\n DeletionProtectionEnabled = false,\n },\n DeletionProtection = false,\n });\n\n var existing_psql = new Gcp.DatabaseMigrationService.ConnectionProfile(\"existing-psql\", new()\n {\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-cp\",\n DisplayName = \"destination-cp_display\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Postgresql = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfilePostgresqlArgs\n {\n CloudSqlId = \"destination-csql\",\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n destinationCsql,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/databasemigrationservice\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestinationCsql, err := sql.NewDatabaseInstance(ctx, \"destination_csql\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"destination-csql\"),\n\t\t\tDatabaseVersion: pulumi.String(\"POSTGRES_15\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-custom-2-13312\"),\n\t\t\t\tDeletionProtectionEnabled: pulumi.Bool(false),\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databasemigrationservice.NewConnectionProfile(ctx, \"existing-psql\", \u0026databasemigrationservice.ConnectionProfileArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-cp\"),\n\t\t\tDisplayName: pulumi.String(\"destination-cp_display\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tPostgresql: \u0026databasemigrationservice.ConnectionProfilePostgresqlArgs{\n\t\t\t\tCloudSqlId: pulumi.String(\"destination-csql\"),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tdestinationCsql,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfile;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfileArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfilePostgresqlArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var destinationCsql = new DatabaseInstance(\"destinationCsql\", DatabaseInstanceArgs.builder()\n .name(\"destination-csql\")\n .databaseVersion(\"POSTGRES_15\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-custom-2-13312\")\n .deletionProtectionEnabled(false)\n .build())\n .deletionProtection(false)\n .build());\n\n var existing_psql = new ConnectionProfile(\"existing-psql\", ConnectionProfileArgs.builder()\n .location(\"us-central1\")\n .connectionProfileId(\"destination-cp\")\n .displayName(\"destination-cp_display\")\n .labels(Map.of(\"foo\", \"bar\"))\n .postgresql(ConnectionProfilePostgresqlArgs.builder()\n .cloudSqlId(\"destination-csql\")\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(destinationCsql)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n destinationCsql:\n type: gcp:sql:DatabaseInstance\n name: destination_csql\n properties:\n name: destination-csql\n databaseVersion: POSTGRES_15\n settings:\n tier: db-custom-2-13312\n deletionProtectionEnabled: false\n deletionProtection: false\n existing-psql:\n type: gcp:databasemigrationservice:ConnectionProfile\n properties:\n location: us-central1\n connectionProfileId: destination-cp\n displayName: destination-cp_display\n labels:\n foo: bar\n postgresql:\n cloudSqlId: destination-csql\n options:\n dependson:\n - ${destinationCsql}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Database Migration Service Connection Profile Existing Alloydb\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst project = gcp.organizations.getProject({});\nconst _default = new gcp.compute.Network(\"default\", {name: \"destination-alloydb\"});\nconst destinationAlloydb = new gcp.alloydb.Cluster(\"destination_alloydb\", {\n clusterId: \"destination-alloydb\",\n location: \"us-central1\",\n networkConfig: {\n network: _default.id,\n },\n databaseVersion: \"POSTGRES_15\",\n initialUser: {\n user: \"destination-alloydb\",\n password: \"destination-alloydb\",\n },\n});\nconst privateIpAlloc = new gcp.compute.GlobalAddress(\"private_ip_alloc\", {\n name: \"destination-alloydb\",\n addressType: \"INTERNAL\",\n purpose: \"VPC_PEERING\",\n prefixLength: 16,\n network: _default.id,\n});\nconst vpcConnection = new gcp.servicenetworking.Connection(\"vpc_connection\", {\n network: _default.id,\n service: \"servicenetworking.googleapis.com\",\n reservedPeeringRanges: [privateIpAlloc.name],\n});\nconst destinationAlloydbPrimary = new gcp.alloydb.Instance(\"destination_alloydb_primary\", {\n cluster: destinationAlloydb.name,\n instanceId: \"destination-alloydb-primary\",\n instanceType: \"PRIMARY\",\n}, {\n dependsOn: [vpcConnection],\n});\nconst existing_alloydb = new gcp.databasemigrationservice.ConnectionProfile(\"existing-alloydb\", {\n location: \"us-central1\",\n connectionProfileId: \"destination-cp\",\n displayName: \"destination-cp_display\",\n labels: {\n foo: \"bar\",\n },\n postgresql: {\n alloydbClusterId: \"destination-alloydb\",\n },\n}, {\n dependsOn: [\n destinationAlloydb,\n destinationAlloydbPrimary,\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproject = gcp.organizations.get_project()\ndefault = gcp.compute.Network(\"default\", name=\"destination-alloydb\")\ndestination_alloydb = gcp.alloydb.Cluster(\"destination_alloydb\",\n cluster_id=\"destination-alloydb\",\n location=\"us-central1\",\n network_config={\n \"network\": default.id,\n },\n database_version=\"POSTGRES_15\",\n initial_user={\n \"user\": \"destination-alloydb\",\n \"password\": \"destination-alloydb\",\n })\nprivate_ip_alloc = gcp.compute.GlobalAddress(\"private_ip_alloc\",\n name=\"destination-alloydb\",\n address_type=\"INTERNAL\",\n purpose=\"VPC_PEERING\",\n prefix_length=16,\n network=default.id)\nvpc_connection = gcp.servicenetworking.Connection(\"vpc_connection\",\n network=default.id,\n service=\"servicenetworking.googleapis.com\",\n reserved_peering_ranges=[private_ip_alloc.name])\ndestination_alloydb_primary = gcp.alloydb.Instance(\"destination_alloydb_primary\",\n cluster=destination_alloydb.name,\n instance_id=\"destination-alloydb-primary\",\n instance_type=\"PRIMARY\",\n opts = pulumi.ResourceOptions(depends_on=[vpc_connection]))\nexisting_alloydb = gcp.databasemigrationservice.ConnectionProfile(\"existing-alloydb\",\n location=\"us-central1\",\n connection_profile_id=\"destination-cp\",\n display_name=\"destination-cp_display\",\n labels={\n \"foo\": \"bar\",\n },\n postgresql={\n \"alloydb_cluster_id\": \"destination-alloydb\",\n },\n opts = pulumi.ResourceOptions(depends_on=[\n destination_alloydb,\n destination_alloydb_primary,\n ]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var @default = new Gcp.Compute.Network(\"default\", new()\n {\n Name = \"destination-alloydb\",\n });\n\n var destinationAlloydb = new Gcp.Alloydb.Cluster(\"destination_alloydb\", new()\n {\n ClusterId = \"destination-alloydb\",\n Location = \"us-central1\",\n NetworkConfig = new Gcp.Alloydb.Inputs.ClusterNetworkConfigArgs\n {\n Network = @default.Id,\n },\n DatabaseVersion = \"POSTGRES_15\",\n InitialUser = new Gcp.Alloydb.Inputs.ClusterInitialUserArgs\n {\n User = \"destination-alloydb\",\n Password = \"destination-alloydb\",\n },\n });\n\n var privateIpAlloc = new Gcp.Compute.GlobalAddress(\"private_ip_alloc\", new()\n {\n Name = \"destination-alloydb\",\n AddressType = \"INTERNAL\",\n Purpose = \"VPC_PEERING\",\n PrefixLength = 16,\n Network = @default.Id,\n });\n\n var vpcConnection = new Gcp.ServiceNetworking.Connection(\"vpc_connection\", new()\n {\n Network = @default.Id,\n Service = \"servicenetworking.googleapis.com\",\n ReservedPeeringRanges = new[]\n {\n privateIpAlloc.Name,\n },\n });\n\n var destinationAlloydbPrimary = new Gcp.Alloydb.Instance(\"destination_alloydb_primary\", new()\n {\n Cluster = destinationAlloydb.Name,\n InstanceId = \"destination-alloydb-primary\",\n InstanceType = \"PRIMARY\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n vpcConnection,\n },\n });\n\n var existing_alloydb = new Gcp.DatabaseMigrationService.ConnectionProfile(\"existing-alloydb\", new()\n {\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-cp\",\n DisplayName = \"destination-cp_display\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Postgresql = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfilePostgresqlArgs\n {\n AlloydbClusterId = \"destination-alloydb\",\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n destinationAlloydb,\n destinationAlloydbPrimary,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/alloydb\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/databasemigrationservice\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/servicenetworking\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = compute.NewNetwork(ctx, \"default\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"destination-alloydb\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestinationAlloydb, err := alloydb.NewCluster(ctx, \"destination_alloydb\", \u0026alloydb.ClusterArgs{\n\t\t\tClusterId: pulumi.String(\"destination-alloydb\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tNetworkConfig: \u0026alloydb.ClusterNetworkConfigArgs{\n\t\t\t\tNetwork: _default.ID(),\n\t\t\t},\n\t\t\tDatabaseVersion: pulumi.String(\"POSTGRES_15\"),\n\t\t\tInitialUser: \u0026alloydb.ClusterInitialUserArgs{\n\t\t\t\tUser: pulumi.String(\"destination-alloydb\"),\n\t\t\t\tPassword: pulumi.String(\"destination-alloydb\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tprivateIpAlloc, err := compute.NewGlobalAddress(ctx, \"private_ip_alloc\", \u0026compute.GlobalAddressArgs{\n\t\t\tName: pulumi.String(\"destination-alloydb\"),\n\t\t\tAddressType: pulumi.String(\"INTERNAL\"),\n\t\t\tPurpose: pulumi.String(\"VPC_PEERING\"),\n\t\t\tPrefixLength: pulumi.Int(16),\n\t\t\tNetwork: _default.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvpcConnection, err := servicenetworking.NewConnection(ctx, \"vpc_connection\", \u0026servicenetworking.ConnectionArgs{\n\t\t\tNetwork: _default.ID(),\n\t\t\tService: pulumi.String(\"servicenetworking.googleapis.com\"),\n\t\t\tReservedPeeringRanges: pulumi.StringArray{\n\t\t\t\tprivateIpAlloc.Name,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestinationAlloydbPrimary, err := alloydb.NewInstance(ctx, \"destination_alloydb_primary\", \u0026alloydb.InstanceArgs{\n\t\t\tCluster: destinationAlloydb.Name,\n\t\t\tInstanceId: pulumi.String(\"destination-alloydb-primary\"),\n\t\t\tInstanceType: pulumi.String(\"PRIMARY\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tvpcConnection,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = databasemigrationservice.NewConnectionProfile(ctx, \"existing-alloydb\", \u0026databasemigrationservice.ConnectionProfileArgs{\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-cp\"),\n\t\t\tDisplayName: pulumi.String(\"destination-cp_display\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tPostgresql: \u0026databasemigrationservice.ConnectionProfilePostgresqlArgs{\n\t\t\t\tAlloydbClusterId: pulumi.String(\"destination-alloydb\"),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tdestinationAlloydb,\n\t\t\tdestinationAlloydbPrimary,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.alloydb.Cluster;\nimport com.pulumi.gcp.alloydb.ClusterArgs;\nimport com.pulumi.gcp.alloydb.inputs.ClusterNetworkConfigArgs;\nimport com.pulumi.gcp.alloydb.inputs.ClusterInitialUserArgs;\nimport com.pulumi.gcp.compute.GlobalAddress;\nimport com.pulumi.gcp.compute.GlobalAddressArgs;\nimport com.pulumi.gcp.servicenetworking.Connection;\nimport com.pulumi.gcp.servicenetworking.ConnectionArgs;\nimport com.pulumi.gcp.alloydb.Instance;\nimport com.pulumi.gcp.alloydb.InstanceArgs;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfile;\nimport com.pulumi.gcp.databasemigrationservice.ConnectionProfileArgs;\nimport com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfilePostgresqlArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var default_ = new Network(\"default\", NetworkArgs.builder()\n .name(\"destination-alloydb\")\n .build());\n\n var destinationAlloydb = new Cluster(\"destinationAlloydb\", ClusterArgs.builder()\n .clusterId(\"destination-alloydb\")\n .location(\"us-central1\")\n .networkConfig(ClusterNetworkConfigArgs.builder()\n .network(default_.id())\n .build())\n .databaseVersion(\"POSTGRES_15\")\n .initialUser(ClusterInitialUserArgs.builder()\n .user(\"destination-alloydb\")\n .password(\"destination-alloydb\")\n .build())\n .build());\n\n var privateIpAlloc = new GlobalAddress(\"privateIpAlloc\", GlobalAddressArgs.builder()\n .name(\"destination-alloydb\")\n .addressType(\"INTERNAL\")\n .purpose(\"VPC_PEERING\")\n .prefixLength(16)\n .network(default_.id())\n .build());\n\n var vpcConnection = new Connection(\"vpcConnection\", ConnectionArgs.builder()\n .network(default_.id())\n .service(\"servicenetworking.googleapis.com\")\n .reservedPeeringRanges(privateIpAlloc.name())\n .build());\n\n var destinationAlloydbPrimary = new Instance(\"destinationAlloydbPrimary\", InstanceArgs.builder()\n .cluster(destinationAlloydb.name())\n .instanceId(\"destination-alloydb-primary\")\n .instanceType(\"PRIMARY\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(vpcConnection)\n .build());\n\n var existing_alloydb = new ConnectionProfile(\"existing-alloydb\", ConnectionProfileArgs.builder()\n .location(\"us-central1\")\n .connectionProfileId(\"destination-cp\")\n .displayName(\"destination-cp_display\")\n .labels(Map.of(\"foo\", \"bar\"))\n .postgresql(ConnectionProfilePostgresqlArgs.builder()\n .alloydbClusterId(\"destination-alloydb\")\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn( \n destinationAlloydb,\n destinationAlloydbPrimary)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n destinationAlloydb:\n type: gcp:alloydb:Cluster\n name: destination_alloydb\n properties:\n clusterId: destination-alloydb\n location: us-central1\n networkConfig:\n network: ${default.id}\n databaseVersion: POSTGRES_15\n initialUser:\n user: destination-alloydb\n password: destination-alloydb\n destinationAlloydbPrimary:\n type: gcp:alloydb:Instance\n name: destination_alloydb_primary\n properties:\n cluster: ${destinationAlloydb.name}\n instanceId: destination-alloydb-primary\n instanceType: PRIMARY\n options:\n dependson:\n - ${vpcConnection}\n privateIpAlloc:\n type: gcp:compute:GlobalAddress\n name: private_ip_alloc\n properties:\n name: destination-alloydb\n addressType: INTERNAL\n purpose: VPC_PEERING\n prefixLength: 16\n network: ${default.id}\n vpcConnection:\n type: gcp:servicenetworking:Connection\n name: vpc_connection\n properties:\n network: ${default.id}\n service: servicenetworking.googleapis.com\n reservedPeeringRanges:\n - ${privateIpAlloc.name}\n default:\n type: gcp:compute:Network\n properties:\n name: destination-alloydb\n existing-alloydb:\n type: gcp:databasemigrationservice:ConnectionProfile\n properties:\n location: us-central1\n connectionProfileId: destination-cp\n displayName: destination-cp_display\n labels:\n foo: bar\n postgresql:\n alloydbClusterId: destination-alloydb\n options:\n dependson:\n - ${destinationAlloydb}\n - ${destinationAlloydbPrimary}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nConnectionProfile can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}`\n\n* `{{project}}/{{location}}/{{connection_profile_id}}`\n\n* `{{location}}/{{connection_profile_id}}`\n\nWhen using the `pulumi import` command, ConnectionProfile can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:databasemigrationservice/connectionProfile:ConnectionProfile default projects/{{project}}/locations/{{location}}/connectionProfiles/{{connection_profile_id}}\n```\n\n```sh\n$ pulumi import gcp:databasemigrationservice/connectionProfile:ConnectionProfile default {{project}}/{{location}}/{{connection_profile_id}}\n```\n\n```sh\n$ pulumi import gcp:databasemigrationservice/connectionProfile:ConnectionProfile default {{location}}/{{connection_profile_id}}\n```\n\n", "properties": { "alloydb": { "$ref": "#/types/gcp:databasemigrationservice/ConnectionProfileAlloydb:ConnectionProfileAlloydb", @@ -185213,7 +186781,7 @@ } }, "gcp:datastream/stream:Stream": { - "description": "A resource representing streaming data from a source to a destination.\n\n\nTo get more information about Stream, see:\n\n* [API documentation](https://cloud.google.com/datastream/docs/reference/rest/v1/projects.locations.streams)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/datastream/docs/create-a-stream)\n\n## Example Usage\n\n### Datastream Stream Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as random from \"@pulumi/random\";\n\nconst project = gcp.organizations.getProject({});\nconst instance = new gcp.sql.DatabaseInstance(\"instance\", {\n name: \"my-instance\",\n databaseVersion: \"MYSQL_8_0\",\n region: \"us-central1\",\n settings: {\n tier: \"db-f1-micro\",\n backupConfiguration: {\n enabled: true,\n binaryLogEnabled: true,\n },\n ipConfiguration: {\n authorizedNetworks: [\n {\n value: \"34.71.242.81\",\n },\n {\n value: \"34.72.28.29\",\n },\n {\n value: \"34.67.6.157\",\n },\n {\n value: \"34.67.234.134\",\n },\n {\n value: \"34.72.239.218\",\n },\n ],\n },\n },\n deletionProtection: true,\n});\nconst db = new gcp.sql.Database(\"db\", {\n instance: instance.name,\n name: \"db\",\n});\nconst pwd = new random.RandomPassword(\"pwd\", {\n length: 16,\n special: false,\n});\nconst user = new gcp.sql.User(\"user\", {\n name: \"user\",\n instance: instance.name,\n host: \"%\",\n password: pwd.result,\n});\nconst sourceConnectionProfile = new gcp.datastream.ConnectionProfile(\"source_connection_profile\", {\n displayName: \"Source connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n mysqlProfile: {\n hostname: instance.publicIpAddress,\n username: user.name,\n password: user.password,\n },\n});\nconst bucket = new gcp.storage.Bucket(\"bucket\", {\n name: \"my-bucket\",\n location: \"US\",\n uniformBucketLevelAccess: true,\n});\nconst viewer = new gcp.storage.BucketIAMMember(\"viewer\", {\n bucket: bucket.name,\n role: \"roles/storage.objectViewer\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com`),\n});\nconst creator = new gcp.storage.BucketIAMMember(\"creator\", {\n bucket: bucket.name,\n role: \"roles/storage.objectCreator\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com`),\n});\nconst reader = new gcp.storage.BucketIAMMember(\"reader\", {\n bucket: bucket.name,\n role: \"roles/storage.legacyBucketReader\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com`),\n});\nconst keyUser = new gcp.kms.CryptoKeyIAMMember(\"key_user\", {\n cryptoKeyId: \"kms-name\",\n role: \"roles/cloudkms.cryptoKeyEncrypterDecrypter\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com`),\n});\nconst destinationConnectionProfile = new gcp.datastream.ConnectionProfile(\"destination_connection_profile\", {\n displayName: \"Connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"destination-profile\",\n gcsProfile: {\n bucket: bucket.name,\n rootPath: \"/path\",\n },\n});\nconst _default = new gcp.datastream.Stream(\"default\", {\n streamId: \"my-stream\",\n desiredState: \"NOT_STARTED\",\n location: \"us-central1\",\n displayName: \"my stream\",\n labels: {\n key: \"value\",\n },\n sourceConfig: {\n sourceConnectionProfile: sourceConnectionProfile.id,\n mysqlSourceConfig: {\n includeObjects: {\n mysqlDatabases: [{\n database: \"my-database\",\n mysqlTables: [\n {\n table: \"includedTable\",\n mysqlColumns: [{\n column: \"includedColumn\",\n dataType: \"VARCHAR\",\n collation: \"utf8mb4\",\n primaryKey: false,\n nullable: false,\n ordinalPosition: 0,\n }],\n },\n {\n table: \"includedTable_2\",\n },\n ],\n }],\n },\n excludeObjects: {\n mysqlDatabases: [{\n database: \"my-database\",\n mysqlTables: [{\n table: \"excludedTable\",\n mysqlColumns: [{\n column: \"excludedColumn\",\n dataType: \"VARCHAR\",\n collation: \"utf8mb4\",\n primaryKey: false,\n nullable: false,\n ordinalPosition: 0,\n }],\n }],\n }],\n },\n maxConcurrentCdcTasks: 5,\n },\n },\n destinationConfig: {\n destinationConnectionProfile: destinationConnectionProfile.id,\n gcsDestinationConfig: {\n path: \"mydata\",\n fileRotationMb: 200,\n fileRotationInterval: \"60s\",\n jsonFileFormat: {\n schemaFileFormat: \"NO_SCHEMA_FILE\",\n compression: \"GZIP\",\n },\n },\n },\n backfillAll: {\n mysqlExcludedObjects: {\n mysqlDatabases: [{\n database: \"my-database\",\n mysqlTables: [{\n table: \"excludedTable\",\n mysqlColumns: [{\n column: \"excludedColumn\",\n dataType: \"VARCHAR\",\n collation: \"utf8mb4\",\n primaryKey: false,\n nullable: false,\n ordinalPosition: 0,\n }],\n }],\n }],\n },\n },\n customerManagedEncryptionKey: \"kms-name\",\n}, {\n dependsOn: [keyUser],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_random as random\n\nproject = gcp.organizations.get_project()\ninstance = gcp.sql.DatabaseInstance(\"instance\",\n name=\"my-instance\",\n database_version=\"MYSQL_8_0\",\n region=\"us-central1\",\n settings={\n \"tier\": \"db-f1-micro\",\n \"backup_configuration\": {\n \"enabled\": True,\n \"binary_log_enabled\": True,\n },\n \"ip_configuration\": {\n \"authorized_networks\": [\n {\n \"value\": \"34.71.242.81\",\n },\n {\n \"value\": \"34.72.28.29\",\n },\n {\n \"value\": \"34.67.6.157\",\n },\n {\n \"value\": \"34.67.234.134\",\n },\n {\n \"value\": \"34.72.239.218\",\n },\n ],\n },\n },\n deletion_protection=True)\ndb = gcp.sql.Database(\"db\",\n instance=instance.name,\n name=\"db\")\npwd = random.RandomPassword(\"pwd\",\n length=16,\n special=False)\nuser = gcp.sql.User(\"user\",\n name=\"user\",\n instance=instance.name,\n host=\"%\",\n password=pwd.result)\nsource_connection_profile = gcp.datastream.ConnectionProfile(\"source_connection_profile\",\n display_name=\"Source connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n mysql_profile={\n \"hostname\": instance.public_ip_address,\n \"username\": user.name,\n \"password\": user.password,\n })\nbucket = gcp.storage.Bucket(\"bucket\",\n name=\"my-bucket\",\n location=\"US\",\n uniform_bucket_level_access=True)\nviewer = gcp.storage.BucketIAMMember(\"viewer\",\n bucket=bucket.name,\n role=\"roles/storage.objectViewer\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-datastream.iam.gserviceaccount.com\")\ncreator = gcp.storage.BucketIAMMember(\"creator\",\n bucket=bucket.name,\n role=\"roles/storage.objectCreator\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-datastream.iam.gserviceaccount.com\")\nreader = gcp.storage.BucketIAMMember(\"reader\",\n bucket=bucket.name,\n role=\"roles/storage.legacyBucketReader\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-datastream.iam.gserviceaccount.com\")\nkey_user = gcp.kms.CryptoKeyIAMMember(\"key_user\",\n crypto_key_id=\"kms-name\",\n role=\"roles/cloudkms.cryptoKeyEncrypterDecrypter\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-datastream.iam.gserviceaccount.com\")\ndestination_connection_profile = gcp.datastream.ConnectionProfile(\"destination_connection_profile\",\n display_name=\"Connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"destination-profile\",\n gcs_profile={\n \"bucket\": bucket.name,\n \"root_path\": \"/path\",\n })\ndefault = gcp.datastream.Stream(\"default\",\n stream_id=\"my-stream\",\n desired_state=\"NOT_STARTED\",\n location=\"us-central1\",\n display_name=\"my stream\",\n labels={\n \"key\": \"value\",\n },\n source_config={\n \"source_connection_profile\": source_connection_profile.id,\n \"mysql_source_config\": {\n \"include_objects\": {\n \"mysql_databases\": [{\n \"database\": \"my-database\",\n \"mysql_tables\": [\n {\n \"table\": \"includedTable\",\n \"mysql_columns\": [{\n \"column\": \"includedColumn\",\n \"data_type\": \"VARCHAR\",\n \"collation\": \"utf8mb4\",\n \"primary_key\": False,\n \"nullable\": False,\n \"ordinal_position\": 0,\n }],\n },\n {\n \"table\": \"includedTable_2\",\n },\n ],\n }],\n },\n \"exclude_objects\": {\n \"mysql_databases\": [{\n \"database\": \"my-database\",\n \"mysql_tables\": [{\n \"table\": \"excludedTable\",\n \"mysql_columns\": [{\n \"column\": \"excludedColumn\",\n \"data_type\": \"VARCHAR\",\n \"collation\": \"utf8mb4\",\n \"primary_key\": False,\n \"nullable\": False,\n \"ordinal_position\": 0,\n }],\n }],\n }],\n },\n \"max_concurrent_cdc_tasks\": 5,\n },\n },\n destination_config={\n \"destination_connection_profile\": destination_connection_profile.id,\n \"gcs_destination_config\": {\n \"path\": \"mydata\",\n \"file_rotation_mb\": 200,\n \"file_rotation_interval\": \"60s\",\n \"json_file_format\": {\n \"schema_file_format\": \"NO_SCHEMA_FILE\",\n \"compression\": \"GZIP\",\n },\n },\n },\n backfill_all={\n \"mysql_excluded_objects\": {\n \"mysql_databases\": [{\n \"database\": \"my-database\",\n \"mysql_tables\": [{\n \"table\": \"excludedTable\",\n \"mysql_columns\": [{\n \"column\": \"excludedColumn\",\n \"data_type\": \"VARCHAR\",\n \"collation\": \"utf8mb4\",\n \"primary_key\": False,\n \"nullable\": False,\n \"ordinal_position\": 0,\n }],\n }],\n }],\n },\n },\n customer_managed_encryption_key=\"kms-name\",\n opts = pulumi.ResourceOptions(depends_on=[key_user]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Random = Pulumi.Random;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var instance = new Gcp.Sql.DatabaseInstance(\"instance\", new()\n {\n Name = \"my-instance\",\n DatabaseVersion = \"MYSQL_8_0\",\n Region = \"us-central1\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-f1-micro\",\n BackupConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsBackupConfigurationArgs\n {\n Enabled = true,\n BinaryLogEnabled = true,\n },\n IpConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationArgs\n {\n AuthorizedNetworks = new[]\n {\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.71.242.81\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.28.29\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.6.157\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.234.134\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.239.218\",\n },\n },\n },\n },\n DeletionProtection = true,\n });\n\n var db = new Gcp.Sql.Database(\"db\", new()\n {\n Instance = instance.Name,\n Name = \"db\",\n });\n\n var pwd = new Random.RandomPassword(\"pwd\", new()\n {\n Length = 16,\n Special = false,\n });\n\n var user = new Gcp.Sql.User(\"user\", new()\n {\n Name = \"user\",\n Instance = instance.Name,\n Host = \"%\",\n Password = pwd.Result,\n });\n\n var sourceConnectionProfile = new Gcp.Datastream.ConnectionProfile(\"source_connection_profile\", new()\n {\n DisplayName = \"Source connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n MysqlProfile = new Gcp.Datastream.Inputs.ConnectionProfileMysqlProfileArgs\n {\n Hostname = instance.PublicIpAddress,\n Username = user.Name,\n Password = user.Password,\n },\n });\n\n var bucket = new Gcp.Storage.Bucket(\"bucket\", new()\n {\n Name = \"my-bucket\",\n Location = \"US\",\n UniformBucketLevelAccess = true,\n });\n\n var viewer = new Gcp.Storage.BucketIAMMember(\"viewer\", new()\n {\n Bucket = bucket.Name,\n Role = \"roles/storage.objectViewer\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-datastream.iam.gserviceaccount.com\",\n });\n\n var creator = new Gcp.Storage.BucketIAMMember(\"creator\", new()\n {\n Bucket = bucket.Name,\n Role = \"roles/storage.objectCreator\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-datastream.iam.gserviceaccount.com\",\n });\n\n var reader = new Gcp.Storage.BucketIAMMember(\"reader\", new()\n {\n Bucket = bucket.Name,\n Role = \"roles/storage.legacyBucketReader\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-datastream.iam.gserviceaccount.com\",\n });\n\n var keyUser = new Gcp.Kms.CryptoKeyIAMMember(\"key_user\", new()\n {\n CryptoKeyId = \"kms-name\",\n Role = \"roles/cloudkms.cryptoKeyEncrypterDecrypter\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-datastream.iam.gserviceaccount.com\",\n });\n\n var destinationConnectionProfile = new Gcp.Datastream.ConnectionProfile(\"destination_connection_profile\", new()\n {\n DisplayName = \"Connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-profile\",\n GcsProfile = new Gcp.Datastream.Inputs.ConnectionProfileGcsProfileArgs\n {\n Bucket = bucket.Name,\n RootPath = \"/path\",\n },\n });\n\n var @default = new Gcp.Datastream.Stream(\"default\", new()\n {\n StreamId = \"my-stream\",\n DesiredState = \"NOT_STARTED\",\n Location = \"us-central1\",\n DisplayName = \"my stream\",\n Labels = \n {\n { \"key\", \"value\" },\n },\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = sourceConnectionProfile.Id,\n MysqlSourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigArgs\n {\n IncludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigIncludeObjectsArgs\n {\n MysqlDatabases = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseArgs\n {\n Database = \"my-database\",\n MysqlTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableArgs\n {\n Table = \"includedTable\",\n MysqlColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableMysqlColumnArgs\n {\n Column = \"includedColumn\",\n DataType = \"VARCHAR\",\n Collation = \"utf8mb4\",\n PrimaryKey = false,\n Nullable = false,\n OrdinalPosition = 0,\n },\n },\n },\n new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableArgs\n {\n Table = \"includedTable_2\",\n },\n },\n },\n },\n },\n ExcludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigExcludeObjectsArgs\n {\n MysqlDatabases = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseArgs\n {\n Database = \"my-database\",\n MysqlTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableArgs\n {\n Table = \"excludedTable\",\n MysqlColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableMysqlColumnArgs\n {\n Column = \"excludedColumn\",\n DataType = \"VARCHAR\",\n Collation = \"utf8mb4\",\n PrimaryKey = false,\n Nullable = false,\n OrdinalPosition = 0,\n },\n },\n },\n },\n },\n },\n },\n MaxConcurrentCdcTasks = 5,\n },\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destinationConnectionProfile.Id,\n GcsDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigGcsDestinationConfigArgs\n {\n Path = \"mydata\",\n FileRotationMb = 200,\n FileRotationInterval = \"60s\",\n JsonFileFormat = new Gcp.Datastream.Inputs.StreamDestinationConfigGcsDestinationConfigJsonFileFormatArgs\n {\n SchemaFileFormat = \"NO_SCHEMA_FILE\",\n Compression = \"GZIP\",\n },\n },\n },\n BackfillAll = new Gcp.Datastream.Inputs.StreamBackfillAllArgs\n {\n MysqlExcludedObjects = new Gcp.Datastream.Inputs.StreamBackfillAllMysqlExcludedObjectsArgs\n {\n MysqlDatabases = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseArgs\n {\n Database = \"my-database\",\n MysqlTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableArgs\n {\n Table = \"excludedTable\",\n MysqlColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableMysqlColumnArgs\n {\n Column = \"excludedColumn\",\n DataType = \"VARCHAR\",\n Collation = \"utf8mb4\",\n PrimaryKey = false,\n Nullable = false,\n OrdinalPosition = 0,\n },\n },\n },\n },\n },\n },\n },\n },\n CustomerManagedEncryptionKey = \"kms-name\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n keyUser,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage\"\n\t\"github.com/pulumi/pulumi-random/sdk/v4/go/random\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinstance, err := sql.NewDatabaseInstance(ctx, \"instance\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"my-instance\"),\n\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_8_0\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-f1-micro\"),\n\t\t\t\tBackupConfiguration: \u0026sql.DatabaseInstanceSettingsBackupConfigurationArgs{\n\t\t\t\t\tEnabled: pulumi.Bool(true),\n\t\t\t\t\tBinaryLogEnabled: pulumi.Bool(true),\n\t\t\t\t},\n\t\t\t\tIpConfiguration: \u0026sql.DatabaseInstanceSettingsIpConfigurationArgs{\n\t\t\t\t\tAuthorizedNetworks: sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArray{\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.71.242.81\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.28.29\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.6.157\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.234.134\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.239.218\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = sql.NewDatabase(ctx, \"db\", \u0026sql.DatabaseArgs{\n\t\t\tInstance: instance.Name,\n\t\t\tName: pulumi.String(\"db\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpwd, err := random.NewRandomPassword(ctx, \"pwd\", \u0026random.RandomPasswordArgs{\n\t\t\tLength: pulumi.Int(16),\n\t\t\tSpecial: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuser, err := sql.NewUser(ctx, \"user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"user\"),\n\t\t\tInstance: instance.Name,\n\t\t\tHost: pulumi.String(\"%\"),\n\t\t\tPassword: pwd.Result,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsourceConnectionProfile, err := datastream.NewConnectionProfile(ctx, \"source_connection_profile\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Source connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tMysqlProfile: \u0026datastream.ConnectionProfileMysqlProfileArgs{\n\t\t\t\tHostname: instance.PublicIpAddress,\n\t\t\t\tUsername: user.Name,\n\t\t\t\tPassword: user.Password,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbucket, err := storage.NewBucket(ctx, \"bucket\", \u0026storage.BucketArgs{\n\t\t\tName: pulumi.String(\"my-bucket\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tUniformBucketLevelAccess: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = storage.NewBucketIAMMember(ctx, \"viewer\", \u0026storage.BucketIAMMemberArgs{\n\t\t\tBucket: bucket.Name,\n\t\t\tRole: pulumi.String(\"roles/storage.objectViewer\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-datastream.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = storage.NewBucketIAMMember(ctx, \"creator\", \u0026storage.BucketIAMMemberArgs{\n\t\t\tBucket: bucket.Name,\n\t\t\tRole: pulumi.String(\"roles/storage.objectCreator\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-datastream.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = storage.NewBucketIAMMember(ctx, \"reader\", \u0026storage.BucketIAMMemberArgs{\n\t\t\tBucket: bucket.Name,\n\t\t\tRole: pulumi.String(\"roles/storage.legacyBucketReader\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-datastream.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tkeyUser, err := kms.NewCryptoKeyIAMMember(ctx, \"key_user\", \u0026kms.CryptoKeyIAMMemberArgs{\n\t\t\tCryptoKeyId: pulumi.String(\"kms-name\"),\n\t\t\tRole: pulumi.String(\"roles/cloudkms.cryptoKeyEncrypterDecrypter\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-datastream.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestinationConnectionProfile, err := datastream.NewConnectionProfile(ctx, \"destination_connection_profile\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-profile\"),\n\t\t\tGcsProfile: \u0026datastream.ConnectionProfileGcsProfileArgs{\n\t\t\t\tBucket: bucket.Name,\n\t\t\t\tRootPath: pulumi.String(\"/path\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"default\", \u0026datastream.StreamArgs{\n\t\t\tStreamId: pulumi.String(\"my-stream\"),\n\t\t\tDesiredState: pulumi.String(\"NOT_STARTED\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDisplayName: pulumi.String(\"my stream\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"key\": pulumi.String(\"value\"),\n\t\t\t},\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: sourceConnectionProfile.ID(),\n\t\t\t\tMysqlSourceConfig: \u0026datastream.StreamSourceConfigMysqlSourceConfigArgs{\n\t\t\t\t\tIncludeObjects: \u0026datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsArgs{\n\t\t\t\t\t\tMysqlDatabases: datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseArgs{\n\t\t\t\t\t\t\t\tDatabase: pulumi.String(\"my-database\"),\n\t\t\t\t\t\t\t\tMysqlTables: datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"includedTable\"),\n\t\t\t\t\t\t\t\t\t\tMysqlColumns: datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableMysqlColumnArray{\n\t\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableMysqlColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"includedColumn\"),\n\t\t\t\t\t\t\t\t\t\t\t\tDataType: pulumi.String(\"VARCHAR\"),\n\t\t\t\t\t\t\t\t\t\t\t\tCollation: pulumi.String(\"utf8mb4\"),\n\t\t\t\t\t\t\t\t\t\t\t\tPrimaryKey: pulumi.Bool(false),\n\t\t\t\t\t\t\t\t\t\t\t\tNullable: pulumi.Bool(false),\n\t\t\t\t\t\t\t\t\t\t\t\tOrdinalPosition: pulumi.Int(0),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"includedTable_2\"),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tExcludeObjects: \u0026datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsArgs{\n\t\t\t\t\t\tMysqlDatabases: datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseArgs{\n\t\t\t\t\t\t\t\tDatabase: pulumi.String(\"my-database\"),\n\t\t\t\t\t\t\t\tMysqlTables: datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"excludedTable\"),\n\t\t\t\t\t\t\t\t\t\tMysqlColumns: datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableMysqlColumnArray{\n\t\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableMysqlColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"excludedColumn\"),\n\t\t\t\t\t\t\t\t\t\t\t\tDataType: pulumi.String(\"VARCHAR\"),\n\t\t\t\t\t\t\t\t\t\t\t\tCollation: pulumi.String(\"utf8mb4\"),\n\t\t\t\t\t\t\t\t\t\t\t\tPrimaryKey: pulumi.Bool(false),\n\t\t\t\t\t\t\t\t\t\t\t\tNullable: pulumi.Bool(false),\n\t\t\t\t\t\t\t\t\t\t\t\tOrdinalPosition: pulumi.Int(0),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tMaxConcurrentCdcTasks: pulumi.Int(5),\n\t\t\t\t},\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destinationConnectionProfile.ID(),\n\t\t\t\tGcsDestinationConfig: \u0026datastream.StreamDestinationConfigGcsDestinationConfigArgs{\n\t\t\t\t\tPath: pulumi.String(\"mydata\"),\n\t\t\t\t\tFileRotationMb: pulumi.Int(200),\n\t\t\t\t\tFileRotationInterval: pulumi.String(\"60s\"),\n\t\t\t\t\tJsonFileFormat: \u0026datastream.StreamDestinationConfigGcsDestinationConfigJsonFileFormatArgs{\n\t\t\t\t\t\tSchemaFileFormat: pulumi.String(\"NO_SCHEMA_FILE\"),\n\t\t\t\t\t\tCompression: pulumi.String(\"GZIP\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillAll: \u0026datastream.StreamBackfillAllArgs{\n\t\t\t\tMysqlExcludedObjects: \u0026datastream.StreamBackfillAllMysqlExcludedObjectsArgs{\n\t\t\t\t\tMysqlDatabases: datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseArray{\n\t\t\t\t\t\t\u0026datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseArgs{\n\t\t\t\t\t\t\tDatabase: pulumi.String(\"my-database\"),\n\t\t\t\t\t\t\tMysqlTables: datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableArray{\n\t\t\t\t\t\t\t\t\u0026datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableArgs{\n\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"excludedTable\"),\n\t\t\t\t\t\t\t\t\tMysqlColumns: datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableMysqlColumnArray{\n\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableMysqlColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"excludedColumn\"),\n\t\t\t\t\t\t\t\t\t\t\tDataType: pulumi.String(\"VARCHAR\"),\n\t\t\t\t\t\t\t\t\t\t\tCollation: pulumi.String(\"utf8mb4\"),\n\t\t\t\t\t\t\t\t\t\t\tPrimaryKey: pulumi.Bool(false),\n\t\t\t\t\t\t\t\t\t\t\tNullable: pulumi.Bool(false),\n\t\t\t\t\t\t\t\t\t\t\tOrdinalPosition: pulumi.Int(0),\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tCustomerManagedEncryptionKey: pulumi.String(\"kms-name\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tkeyUser,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsBackupConfigurationArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsIpConfigurationArgs;\nimport com.pulumi.gcp.sql.Database;\nimport com.pulumi.gcp.sql.DatabaseArgs;\nimport com.pulumi.random.RandomPassword;\nimport com.pulumi.random.RandomPasswordArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileMysqlProfileArgs;\nimport com.pulumi.gcp.storage.Bucket;\nimport com.pulumi.gcp.storage.BucketArgs;\nimport com.pulumi.gcp.storage.BucketIAMMember;\nimport com.pulumi.gcp.storage.BucketIAMMemberArgs;\nimport com.pulumi.gcp.kms.CryptoKeyIAMMember;\nimport com.pulumi.gcp.kms.CryptoKeyIAMMemberArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileGcsProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigMysqlSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigMysqlSourceConfigIncludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigMysqlSourceConfigExcludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigGcsDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigGcsDestinationConfigJsonFileFormatArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillAllArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillAllMysqlExcludedObjectsArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var instance = new DatabaseInstance(\"instance\", DatabaseInstanceArgs.builder()\n .name(\"my-instance\")\n .databaseVersion(\"MYSQL_8_0\")\n .region(\"us-central1\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-f1-micro\")\n .backupConfiguration(DatabaseInstanceSettingsBackupConfigurationArgs.builder()\n .enabled(true)\n .binaryLogEnabled(true)\n .build())\n .ipConfiguration(DatabaseInstanceSettingsIpConfigurationArgs.builder()\n .authorizedNetworks( \n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.71.242.81\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.28.29\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.6.157\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.234.134\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.239.218\")\n .build())\n .build())\n .build())\n .deletionProtection(true)\n .build());\n\n var db = new Database(\"db\", DatabaseArgs.builder()\n .instance(instance.name())\n .name(\"db\")\n .build());\n\n var pwd = new RandomPassword(\"pwd\", RandomPasswordArgs.builder()\n .length(16)\n .special(false)\n .build());\n\n var user = new User(\"user\", UserArgs.builder()\n .name(\"user\")\n .instance(instance.name())\n .host(\"%\")\n .password(pwd.result())\n .build());\n\n var sourceConnectionProfile = new ConnectionProfile(\"sourceConnectionProfile\", ConnectionProfileArgs.builder()\n .displayName(\"Source connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .mysqlProfile(ConnectionProfileMysqlProfileArgs.builder()\n .hostname(instance.publicIpAddress())\n .username(user.name())\n .password(user.password())\n .build())\n .build());\n\n var bucket = new Bucket(\"bucket\", BucketArgs.builder()\n .name(\"my-bucket\")\n .location(\"US\")\n .uniformBucketLevelAccess(true)\n .build());\n\n var viewer = new BucketIAMMember(\"viewer\", BucketIAMMemberArgs.builder()\n .bucket(bucket.name())\n .role(\"roles/storage.objectViewer\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-datastream.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var creator = new BucketIAMMember(\"creator\", BucketIAMMemberArgs.builder()\n .bucket(bucket.name())\n .role(\"roles/storage.objectCreator\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-datastream.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var reader = new BucketIAMMember(\"reader\", BucketIAMMemberArgs.builder()\n .bucket(bucket.name())\n .role(\"roles/storage.legacyBucketReader\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-datastream.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var keyUser = new CryptoKeyIAMMember(\"keyUser\", CryptoKeyIAMMemberArgs.builder()\n .cryptoKeyId(\"kms-name\")\n .role(\"roles/cloudkms.cryptoKeyEncrypterDecrypter\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-datastream.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var destinationConnectionProfile = new ConnectionProfile(\"destinationConnectionProfile\", ConnectionProfileArgs.builder()\n .displayName(\"Connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"destination-profile\")\n .gcsProfile(ConnectionProfileGcsProfileArgs.builder()\n .bucket(bucket.name())\n .rootPath(\"/path\")\n .build())\n .build());\n\n var default_ = new Stream(\"default\", StreamArgs.builder()\n .streamId(\"my-stream\")\n .desiredState(\"NOT_STARTED\")\n .location(\"us-central1\")\n .displayName(\"my stream\")\n .labels(Map.of(\"key\", \"value\"))\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(sourceConnectionProfile.id())\n .mysqlSourceConfig(StreamSourceConfigMysqlSourceConfigArgs.builder()\n .includeObjects(StreamSourceConfigMysqlSourceConfigIncludeObjectsArgs.builder()\n .mysqlDatabases(StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseArgs.builder()\n .database(\"my-database\")\n .mysqlTables( \n StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableArgs.builder()\n .table(\"includedTable\")\n .mysqlColumns(StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableMysqlColumnArgs.builder()\n .column(\"includedColumn\")\n .dataType(\"VARCHAR\")\n .collation(\"utf8mb4\")\n .primaryKey(false)\n .nullable(false)\n .ordinalPosition(0)\n .build())\n .build(),\n StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableArgs.builder()\n .table(\"includedTable_2\")\n .build())\n .build())\n .build())\n .excludeObjects(StreamSourceConfigMysqlSourceConfigExcludeObjectsArgs.builder()\n .mysqlDatabases(StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseArgs.builder()\n .database(\"my-database\")\n .mysqlTables(StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableArgs.builder()\n .table(\"excludedTable\")\n .mysqlColumns(StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableMysqlColumnArgs.builder()\n .column(\"excludedColumn\")\n .dataType(\"VARCHAR\")\n .collation(\"utf8mb4\")\n .primaryKey(false)\n .nullable(false)\n .ordinalPosition(0)\n .build())\n .build())\n .build())\n .build())\n .maxConcurrentCdcTasks(5)\n .build())\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destinationConnectionProfile.id())\n .gcsDestinationConfig(StreamDestinationConfigGcsDestinationConfigArgs.builder()\n .path(\"mydata\")\n .fileRotationMb(200)\n .fileRotationInterval(\"60s\")\n .jsonFileFormat(StreamDestinationConfigGcsDestinationConfigJsonFileFormatArgs.builder()\n .schemaFileFormat(\"NO_SCHEMA_FILE\")\n .compression(\"GZIP\")\n .build())\n .build())\n .build())\n .backfillAll(StreamBackfillAllArgs.builder()\n .mysqlExcludedObjects(StreamBackfillAllMysqlExcludedObjectsArgs.builder()\n .mysqlDatabases(StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseArgs.builder()\n .database(\"my-database\")\n .mysqlTables(StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableArgs.builder()\n .table(\"excludedTable\")\n .mysqlColumns(StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableMysqlColumnArgs.builder()\n .column(\"excludedColumn\")\n .dataType(\"VARCHAR\")\n .collation(\"utf8mb4\")\n .primaryKey(false)\n .nullable(false)\n .ordinalPosition(0)\n .build())\n .build())\n .build())\n .build())\n .build())\n .customerManagedEncryptionKey(\"kms-name\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(keyUser)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instance:\n type: gcp:sql:DatabaseInstance\n properties:\n name: my-instance\n databaseVersion: MYSQL_8_0\n region: us-central1\n settings:\n tier: db-f1-micro\n backupConfiguration:\n enabled: true\n binaryLogEnabled: true\n ipConfiguration:\n authorizedNetworks:\n - value: 34.71.242.81\n - value: 34.72.28.29\n - value: 34.67.6.157\n - value: 34.67.234.134\n - value: 34.72.239.218\n deletionProtection: true\n db:\n type: gcp:sql:Database\n properties:\n instance: ${instance.name}\n name: db\n pwd:\n type: random:RandomPassword\n properties:\n length: 16\n special: false\n user:\n type: gcp:sql:User\n properties:\n name: user\n instance: ${instance.name}\n host: '%'\n password: ${pwd.result}\n sourceConnectionProfile:\n type: gcp:datastream:ConnectionProfile\n name: source_connection_profile\n properties:\n displayName: Source connection profile\n location: us-central1\n connectionProfileId: source-profile\n mysqlProfile:\n hostname: ${instance.publicIpAddress}\n username: ${user.name}\n password: ${user.password}\n bucket:\n type: gcp:storage:Bucket\n properties:\n name: my-bucket\n location: US\n uniformBucketLevelAccess: true\n viewer:\n type: gcp:storage:BucketIAMMember\n properties:\n bucket: ${bucket.name}\n role: roles/storage.objectViewer\n member: serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com\n creator:\n type: gcp:storage:BucketIAMMember\n properties:\n bucket: ${bucket.name}\n role: roles/storage.objectCreator\n member: serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com\n reader:\n type: gcp:storage:BucketIAMMember\n properties:\n bucket: ${bucket.name}\n role: roles/storage.legacyBucketReader\n member: serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com\n keyUser:\n type: gcp:kms:CryptoKeyIAMMember\n name: key_user\n properties:\n cryptoKeyId: kms-name\n role: roles/cloudkms.cryptoKeyEncrypterDecrypter\n member: serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com\n destinationConnectionProfile:\n type: gcp:datastream:ConnectionProfile\n name: destination_connection_profile\n properties:\n displayName: Connection profile\n location: us-central1\n connectionProfileId: destination-profile\n gcsProfile:\n bucket: ${bucket.name}\n rootPath: /path\n default:\n type: gcp:datastream:Stream\n properties:\n streamId: my-stream\n desiredState: NOT_STARTED\n location: us-central1\n displayName: my stream\n labels:\n key: value\n sourceConfig:\n sourceConnectionProfile: ${sourceConnectionProfile.id}\n mysqlSourceConfig:\n includeObjects:\n mysqlDatabases:\n - database: my-database\n mysqlTables:\n - table: includedTable\n mysqlColumns:\n - column: includedColumn\n dataType: VARCHAR\n collation: utf8mb4\n primaryKey: false\n nullable: false\n ordinalPosition: 0\n - table: includedTable_2\n excludeObjects:\n mysqlDatabases:\n - database: my-database\n mysqlTables:\n - table: excludedTable\n mysqlColumns:\n - column: excludedColumn\n dataType: VARCHAR\n collation: utf8mb4\n primaryKey: false\n nullable: false\n ordinalPosition: 0\n maxConcurrentCdcTasks: 5\n destinationConfig:\n destinationConnectionProfile: ${destinationConnectionProfile.id}\n gcsDestinationConfig:\n path: mydata\n fileRotationMb: 200\n fileRotationInterval: 60s\n jsonFileFormat:\n schemaFileFormat: NO_SCHEMA_FILE\n compression: GZIP\n backfillAll:\n mysqlExcludedObjects:\n mysqlDatabases:\n - database: my-database\n mysqlTables:\n - table: excludedTable\n mysqlColumns:\n - column: excludedColumn\n dataType: VARCHAR\n collation: utf8mb4\n primaryKey: false\n nullable: false\n ordinalPosition: 0\n customerManagedEncryptionKey: kms-name\n options:\n dependson:\n - ${keyUser}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Datastream Stream Postgresql\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst source = new gcp.datastream.ConnectionProfile(\"source\", {\n displayName: \"Postgresql Source\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n postgresqlProfile: {\n hostname: \"hostname\",\n port: 3306,\n username: \"user\",\n password: \"pass\",\n database: \"postgres\",\n },\n});\nconst destination = new gcp.datastream.ConnectionProfile(\"destination\", {\n displayName: \"BigQuery Destination\",\n location: \"us-central1\",\n connectionProfileId: \"destination-profile\",\n bigqueryProfile: {},\n});\nconst _default = new gcp.datastream.Stream(\"default\", {\n displayName: \"Postgres to BigQuery\",\n location: \"us-central1\",\n streamId: \"my-stream\",\n desiredState: \"RUNNING\",\n sourceConfig: {\n sourceConnectionProfile: source.id,\n postgresqlSourceConfig: {\n maxConcurrentBackfillTasks: 12,\n publication: \"publication\",\n replicationSlot: \"replication_slot\",\n includeObjects: {\n postgresqlSchemas: [{\n schema: \"schema\",\n postgresqlTables: [{\n table: \"table\",\n postgresqlColumns: [{\n column: \"column\",\n }],\n }],\n }],\n },\n excludeObjects: {\n postgresqlSchemas: [{\n schema: \"schema\",\n postgresqlTables: [{\n table: \"table\",\n postgresqlColumns: [{\n column: \"column\",\n }],\n }],\n }],\n },\n },\n },\n destinationConfig: {\n destinationConnectionProfile: destination.id,\n bigqueryDestinationConfig: {\n dataFreshness: \"900s\",\n sourceHierarchyDatasets: {\n datasetTemplate: {\n location: \"us-central1\",\n },\n },\n },\n },\n backfillAll: {\n postgresqlExcludedObjects: {\n postgresqlSchemas: [{\n schema: \"schema\",\n postgresqlTables: [{\n table: \"table\",\n postgresqlColumns: [{\n column: \"column\",\n }],\n }],\n }],\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nsource = gcp.datastream.ConnectionProfile(\"source\",\n display_name=\"Postgresql Source\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n postgresql_profile={\n \"hostname\": \"hostname\",\n \"port\": 3306,\n \"username\": \"user\",\n \"password\": \"pass\",\n \"database\": \"postgres\",\n })\ndestination = gcp.datastream.ConnectionProfile(\"destination\",\n display_name=\"BigQuery Destination\",\n location=\"us-central1\",\n connection_profile_id=\"destination-profile\",\n bigquery_profile={})\ndefault = gcp.datastream.Stream(\"default\",\n display_name=\"Postgres to BigQuery\",\n location=\"us-central1\",\n stream_id=\"my-stream\",\n desired_state=\"RUNNING\",\n source_config={\n \"source_connection_profile\": source.id,\n \"postgresql_source_config\": {\n \"max_concurrent_backfill_tasks\": 12,\n \"publication\": \"publication\",\n \"replication_slot\": \"replication_slot\",\n \"include_objects\": {\n \"postgresql_schemas\": [{\n \"schema\": \"schema\",\n \"postgresql_tables\": [{\n \"table\": \"table\",\n \"postgresql_columns\": [{\n \"column\": \"column\",\n }],\n }],\n }],\n },\n \"exclude_objects\": {\n \"postgresql_schemas\": [{\n \"schema\": \"schema\",\n \"postgresql_tables\": [{\n \"table\": \"table\",\n \"postgresql_columns\": [{\n \"column\": \"column\",\n }],\n }],\n }],\n },\n },\n },\n destination_config={\n \"destination_connection_profile\": destination.id,\n \"bigquery_destination_config\": {\n \"data_freshness\": \"900s\",\n \"source_hierarchy_datasets\": {\n \"dataset_template\": {\n \"location\": \"us-central1\",\n },\n },\n },\n },\n backfill_all={\n \"postgresql_excluded_objects\": {\n \"postgresql_schemas\": [{\n \"schema\": \"schema\",\n \"postgresql_tables\": [{\n \"table\": \"table\",\n \"postgresql_columns\": [{\n \"column\": \"column\",\n }],\n }],\n }],\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var source = new Gcp.Datastream.ConnectionProfile(\"source\", new()\n {\n DisplayName = \"Postgresql Source\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n PostgresqlProfile = new Gcp.Datastream.Inputs.ConnectionProfilePostgresqlProfileArgs\n {\n Hostname = \"hostname\",\n Port = 3306,\n Username = \"user\",\n Password = \"pass\",\n Database = \"postgres\",\n },\n });\n\n var destination = new Gcp.Datastream.ConnectionProfile(\"destination\", new()\n {\n DisplayName = \"BigQuery Destination\",\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-profile\",\n BigqueryProfile = null,\n });\n\n var @default = new Gcp.Datastream.Stream(\"default\", new()\n {\n DisplayName = \"Postgres to BigQuery\",\n Location = \"us-central1\",\n StreamId = \"my-stream\",\n DesiredState = \"RUNNING\",\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = source.Id,\n PostgresqlSourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigArgs\n {\n MaxConcurrentBackfillTasks = 12,\n Publication = \"publication\",\n ReplicationSlot = \"replication_slot\",\n IncludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsArgs\n {\n PostgresqlSchemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaArgs\n {\n Schema = \"schema\",\n PostgresqlTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTableArgs\n {\n Table = \"table\",\n PostgresqlColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs\n {\n Column = \"column\",\n },\n },\n },\n },\n },\n },\n },\n ExcludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsArgs\n {\n PostgresqlSchemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaArgs\n {\n Schema = \"schema\",\n PostgresqlTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTableArgs\n {\n Table = \"table\",\n PostgresqlColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs\n {\n Column = \"column\",\n },\n },\n },\n },\n },\n },\n },\n },\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destination.Id,\n BigqueryDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigArgs\n {\n DataFreshness = \"900s\",\n SourceHierarchyDatasets = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs\n {\n DatasetTemplate = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs\n {\n Location = \"us-central1\",\n },\n },\n },\n },\n BackfillAll = new Gcp.Datastream.Inputs.StreamBackfillAllArgs\n {\n PostgresqlExcludedObjects = new Gcp.Datastream.Inputs.StreamBackfillAllPostgresqlExcludedObjectsArgs\n {\n PostgresqlSchemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaArgs\n {\n Schema = \"schema\",\n PostgresqlTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTableArgs\n {\n Table = \"table\",\n PostgresqlColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs\n {\n Column = \"column\",\n },\n },\n },\n },\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tsource, err := datastream.NewConnectionProfile(ctx, \"source\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Postgresql Source\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tPostgresqlProfile: \u0026datastream.ConnectionProfilePostgresqlProfileArgs{\n\t\t\t\tHostname: pulumi.String(\"hostname\"),\n\t\t\t\tPort: pulumi.Int(3306),\n\t\t\t\tUsername: pulumi.String(\"user\"),\n\t\t\t\tPassword: pulumi.String(\"pass\"),\n\t\t\t\tDatabase: pulumi.String(\"postgres\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestination, err := datastream.NewConnectionProfile(ctx, \"destination\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"BigQuery Destination\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-profile\"),\n\t\t\tBigqueryProfile: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"default\", \u0026datastream.StreamArgs{\n\t\t\tDisplayName: pulumi.String(\"Postgres to BigQuery\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tStreamId: pulumi.String(\"my-stream\"),\n\t\t\tDesiredState: pulumi.String(\"RUNNING\"),\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: source.ID(),\n\t\t\t\tPostgresqlSourceConfig: \u0026datastream.StreamSourceConfigPostgresqlSourceConfigArgs{\n\t\t\t\t\tMaxConcurrentBackfillTasks: pulumi.Int(12),\n\t\t\t\t\tPublication: pulumi.String(\"publication\"),\n\t\t\t\t\tReplicationSlot: pulumi.String(\"replication_slot\"),\n\t\t\t\t\tIncludeObjects: \u0026datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsArgs{\n\t\t\t\t\t\tPostgresqlSchemas: datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaArgs{\n\t\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\t\tPostgresqlTables: datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\t\tPostgresqlColumns: datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArray{\n\t\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"column\"),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tExcludeObjects: \u0026datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsArgs{\n\t\t\t\t\t\tPostgresqlSchemas: datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaArgs{\n\t\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\t\tPostgresqlTables: datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\t\tPostgresqlColumns: datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArray{\n\t\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"column\"),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destination.ID(),\n\t\t\t\tBigqueryDestinationConfig: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigArgs{\n\t\t\t\t\tDataFreshness: pulumi.String(\"900s\"),\n\t\t\t\t\tSourceHierarchyDatasets: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs{\n\t\t\t\t\t\tDatasetTemplate: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs{\n\t\t\t\t\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillAll: \u0026datastream.StreamBackfillAllArgs{\n\t\t\t\tPostgresqlExcludedObjects: \u0026datastream.StreamBackfillAllPostgresqlExcludedObjectsArgs{\n\t\t\t\t\tPostgresqlSchemas: datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaArray{\n\t\t\t\t\t\t\u0026datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaArgs{\n\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\tPostgresqlTables: datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTableArray{\n\t\t\t\t\t\t\t\t\u0026datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTableArgs{\n\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\tPostgresqlColumns: datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArray{\n\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"column\"),\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfilePostgresqlProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileBigqueryProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigPostgresqlSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillAllArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillAllPostgresqlExcludedObjectsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var source = new ConnectionProfile(\"source\", ConnectionProfileArgs.builder()\n .displayName(\"Postgresql Source\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .postgresqlProfile(ConnectionProfilePostgresqlProfileArgs.builder()\n .hostname(\"hostname\")\n .port(3306)\n .username(\"user\")\n .password(\"pass\")\n .database(\"postgres\")\n .build())\n .build());\n\n var destination = new ConnectionProfile(\"destination\", ConnectionProfileArgs.builder()\n .displayName(\"BigQuery Destination\")\n .location(\"us-central1\")\n .connectionProfileId(\"destination-profile\")\n .bigqueryProfile()\n .build());\n\n var default_ = new Stream(\"default\", StreamArgs.builder()\n .displayName(\"Postgres to BigQuery\")\n .location(\"us-central1\")\n .streamId(\"my-stream\")\n .desiredState(\"RUNNING\")\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(source.id())\n .postgresqlSourceConfig(StreamSourceConfigPostgresqlSourceConfigArgs.builder()\n .maxConcurrentBackfillTasks(12)\n .publication(\"publication\")\n .replicationSlot(\"replication_slot\")\n .includeObjects(StreamSourceConfigPostgresqlSourceConfigIncludeObjectsArgs.builder()\n .postgresqlSchemas(StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaArgs.builder()\n .schema(\"schema\")\n .postgresqlTables(StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTableArgs.builder()\n .table(\"table\")\n .postgresqlColumns(StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs.builder()\n .column(\"column\")\n .build())\n .build())\n .build())\n .build())\n .excludeObjects(StreamSourceConfigPostgresqlSourceConfigExcludeObjectsArgs.builder()\n .postgresqlSchemas(StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaArgs.builder()\n .schema(\"schema\")\n .postgresqlTables(StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTableArgs.builder()\n .table(\"table\")\n .postgresqlColumns(StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs.builder()\n .column(\"column\")\n .build())\n .build())\n .build())\n .build())\n .build())\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destination.id())\n .bigqueryDestinationConfig(StreamDestinationConfigBigqueryDestinationConfigArgs.builder()\n .dataFreshness(\"900s\")\n .sourceHierarchyDatasets(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs.builder()\n .datasetTemplate(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs.builder()\n .location(\"us-central1\")\n .build())\n .build())\n .build())\n .build())\n .backfillAll(StreamBackfillAllArgs.builder()\n .postgresqlExcludedObjects(StreamBackfillAllPostgresqlExcludedObjectsArgs.builder()\n .postgresqlSchemas(StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaArgs.builder()\n .schema(\"schema\")\n .postgresqlTables(StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTableArgs.builder()\n .table(\"table\")\n .postgresqlColumns(StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs.builder()\n .column(\"column\")\n .build())\n .build())\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n source:\n type: gcp:datastream:ConnectionProfile\n properties:\n displayName: Postgresql Source\n location: us-central1\n connectionProfileId: source-profile\n postgresqlProfile:\n hostname: hostname\n port: 3306\n username: user\n password: pass\n database: postgres\n destination:\n type: gcp:datastream:ConnectionProfile\n properties:\n displayName: BigQuery Destination\n location: us-central1\n connectionProfileId: destination-profile\n bigqueryProfile: {}\n default:\n type: gcp:datastream:Stream\n properties:\n displayName: Postgres to BigQuery\n location: us-central1\n streamId: my-stream\n desiredState: RUNNING\n sourceConfig:\n sourceConnectionProfile: ${source.id}\n postgresqlSourceConfig:\n maxConcurrentBackfillTasks: 12\n publication: publication\n replicationSlot: replication_slot\n includeObjects:\n postgresqlSchemas:\n - schema: schema\n postgresqlTables:\n - table: table\n postgresqlColumns:\n - column: column\n excludeObjects:\n postgresqlSchemas:\n - schema: schema\n postgresqlTables:\n - table: table\n postgresqlColumns:\n - column: column\n destinationConfig:\n destinationConnectionProfile: ${destination.id}\n bigqueryDestinationConfig:\n dataFreshness: 900s\n sourceHierarchyDatasets:\n datasetTemplate:\n location: us-central1\n backfillAll:\n postgresqlExcludedObjects:\n postgresqlSchemas:\n - schema: schema\n postgresqlTables:\n - table: table\n postgresqlColumns:\n - column: column\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Datastream Stream Oracle\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst source = new gcp.datastream.ConnectionProfile(\"source\", {\n displayName: \"Oracle Source\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n oracleProfile: {\n hostname: \"hostname\",\n port: 1521,\n username: \"user\",\n password: \"pass\",\n databaseService: \"ORCL\",\n },\n});\nconst destination = new gcp.datastream.ConnectionProfile(\"destination\", {\n displayName: \"BigQuery Destination\",\n location: \"us-central1\",\n connectionProfileId: \"destination-profile\",\n bigqueryProfile: {},\n});\nconst stream5 = new gcp.datastream.Stream(\"stream5\", {\n displayName: \"Oracle to BigQuery\",\n location: \"us-central1\",\n streamId: \"my-stream\",\n desiredState: \"RUNNING\",\n sourceConfig: {\n sourceConnectionProfile: source.id,\n oracleSourceConfig: {\n maxConcurrentCdcTasks: 8,\n maxConcurrentBackfillTasks: 12,\n includeObjects: {\n oracleSchemas: [{\n schema: \"schema\",\n oracleTables: [{\n table: \"table\",\n oracleColumns: [{\n column: \"column\",\n }],\n }],\n }],\n },\n excludeObjects: {\n oracleSchemas: [{\n schema: \"schema\",\n oracleTables: [{\n table: \"table\",\n oracleColumns: [{\n column: \"column\",\n }],\n }],\n }],\n },\n dropLargeObjects: {},\n },\n },\n destinationConfig: {\n destinationConnectionProfile: destination.id,\n bigqueryDestinationConfig: {\n dataFreshness: \"900s\",\n sourceHierarchyDatasets: {\n datasetTemplate: {\n location: \"us-central1\",\n },\n },\n },\n },\n backfillAll: {\n oracleExcludedObjects: {\n oracleSchemas: [{\n schema: \"schema\",\n oracleTables: [{\n table: \"table\",\n oracleColumns: [{\n column: \"column\",\n }],\n }],\n }],\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nsource = gcp.datastream.ConnectionProfile(\"source\",\n display_name=\"Oracle Source\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n oracle_profile={\n \"hostname\": \"hostname\",\n \"port\": 1521,\n \"username\": \"user\",\n \"password\": \"pass\",\n \"database_service\": \"ORCL\",\n })\ndestination = gcp.datastream.ConnectionProfile(\"destination\",\n display_name=\"BigQuery Destination\",\n location=\"us-central1\",\n connection_profile_id=\"destination-profile\",\n bigquery_profile={})\nstream5 = gcp.datastream.Stream(\"stream5\",\n display_name=\"Oracle to BigQuery\",\n location=\"us-central1\",\n stream_id=\"my-stream\",\n desired_state=\"RUNNING\",\n source_config={\n \"source_connection_profile\": source.id,\n \"oracle_source_config\": {\n \"max_concurrent_cdc_tasks\": 8,\n \"max_concurrent_backfill_tasks\": 12,\n \"include_objects\": {\n \"oracle_schemas\": [{\n \"schema\": \"schema\",\n \"oracle_tables\": [{\n \"table\": \"table\",\n \"oracle_columns\": [{\n \"column\": \"column\",\n }],\n }],\n }],\n },\n \"exclude_objects\": {\n \"oracle_schemas\": [{\n \"schema\": \"schema\",\n \"oracle_tables\": [{\n \"table\": \"table\",\n \"oracle_columns\": [{\n \"column\": \"column\",\n }],\n }],\n }],\n },\n \"drop_large_objects\": {},\n },\n },\n destination_config={\n \"destination_connection_profile\": destination.id,\n \"bigquery_destination_config\": {\n \"data_freshness\": \"900s\",\n \"source_hierarchy_datasets\": {\n \"dataset_template\": {\n \"location\": \"us-central1\",\n },\n },\n },\n },\n backfill_all={\n \"oracle_excluded_objects\": {\n \"oracle_schemas\": [{\n \"schema\": \"schema\",\n \"oracle_tables\": [{\n \"table\": \"table\",\n \"oracle_columns\": [{\n \"column\": \"column\",\n }],\n }],\n }],\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var source = new Gcp.Datastream.ConnectionProfile(\"source\", new()\n {\n DisplayName = \"Oracle Source\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n OracleProfile = new Gcp.Datastream.Inputs.ConnectionProfileOracleProfileArgs\n {\n Hostname = \"hostname\",\n Port = 1521,\n Username = \"user\",\n Password = \"pass\",\n DatabaseService = \"ORCL\",\n },\n });\n\n var destination = new Gcp.Datastream.ConnectionProfile(\"destination\", new()\n {\n DisplayName = \"BigQuery Destination\",\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-profile\",\n BigqueryProfile = null,\n });\n\n var stream5 = new Gcp.Datastream.Stream(\"stream5\", new()\n {\n DisplayName = \"Oracle to BigQuery\",\n Location = \"us-central1\",\n StreamId = \"my-stream\",\n DesiredState = \"RUNNING\",\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = source.Id,\n OracleSourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigArgs\n {\n MaxConcurrentCdcTasks = 8,\n MaxConcurrentBackfillTasks = 12,\n IncludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigIncludeObjectsArgs\n {\n OracleSchemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaArgs\n {\n Schema = \"schema\",\n OracleTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableArgs\n {\n Table = \"table\",\n OracleColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableOracleColumnArgs\n {\n Column = \"column\",\n },\n },\n },\n },\n },\n },\n },\n ExcludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigExcludeObjectsArgs\n {\n OracleSchemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaArgs\n {\n Schema = \"schema\",\n OracleTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableArgs\n {\n Table = \"table\",\n OracleColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableOracleColumnArgs\n {\n Column = \"column\",\n },\n },\n },\n },\n },\n },\n },\n DropLargeObjects = null,\n },\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destination.Id,\n BigqueryDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigArgs\n {\n DataFreshness = \"900s\",\n SourceHierarchyDatasets = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs\n {\n DatasetTemplate = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs\n {\n Location = \"us-central1\",\n },\n },\n },\n },\n BackfillAll = new Gcp.Datastream.Inputs.StreamBackfillAllArgs\n {\n OracleExcludedObjects = new Gcp.Datastream.Inputs.StreamBackfillAllOracleExcludedObjectsArgs\n {\n OracleSchemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllOracleExcludedObjectsOracleSchemaArgs\n {\n Schema = \"schema\",\n OracleTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableArgs\n {\n Table = \"table\",\n OracleColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableOracleColumnArgs\n {\n Column = \"column\",\n },\n },\n },\n },\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tsource, err := datastream.NewConnectionProfile(ctx, \"source\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Oracle Source\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tOracleProfile: \u0026datastream.ConnectionProfileOracleProfileArgs{\n\t\t\t\tHostname: pulumi.String(\"hostname\"),\n\t\t\t\tPort: pulumi.Int(1521),\n\t\t\t\tUsername: pulumi.String(\"user\"),\n\t\t\t\tPassword: pulumi.String(\"pass\"),\n\t\t\t\tDatabaseService: pulumi.String(\"ORCL\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestination, err := datastream.NewConnectionProfile(ctx, \"destination\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"BigQuery Destination\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-profile\"),\n\t\t\tBigqueryProfile: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"stream5\", \u0026datastream.StreamArgs{\n\t\t\tDisplayName: pulumi.String(\"Oracle to BigQuery\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tStreamId: pulumi.String(\"my-stream\"),\n\t\t\tDesiredState: pulumi.String(\"RUNNING\"),\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: source.ID(),\n\t\t\t\tOracleSourceConfig: \u0026datastream.StreamSourceConfigOracleSourceConfigArgs{\n\t\t\t\t\tMaxConcurrentCdcTasks: pulumi.Int(8),\n\t\t\t\t\tMaxConcurrentBackfillTasks: pulumi.Int(12),\n\t\t\t\t\tIncludeObjects: \u0026datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsArgs{\n\t\t\t\t\t\tOracleSchemas: datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaArgs{\n\t\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\t\tOracleTables: datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\t\tOracleColumns: datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableOracleColumnArray{\n\t\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableOracleColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"column\"),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tExcludeObjects: \u0026datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsArgs{\n\t\t\t\t\t\tOracleSchemas: datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaArgs{\n\t\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\t\tOracleTables: datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\t\tOracleColumns: datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableOracleColumnArray{\n\t\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableOracleColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"column\"),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tDropLargeObjects: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destination.ID(),\n\t\t\t\tBigqueryDestinationConfig: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigArgs{\n\t\t\t\t\tDataFreshness: pulumi.String(\"900s\"),\n\t\t\t\t\tSourceHierarchyDatasets: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs{\n\t\t\t\t\t\tDatasetTemplate: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs{\n\t\t\t\t\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillAll: \u0026datastream.StreamBackfillAllArgs{\n\t\t\t\tOracleExcludedObjects: \u0026datastream.StreamBackfillAllOracleExcludedObjectsArgs{\n\t\t\t\t\tOracleSchemas: datastream.StreamBackfillAllOracleExcludedObjectsOracleSchemaArray{\n\t\t\t\t\t\t\u0026datastream.StreamBackfillAllOracleExcludedObjectsOracleSchemaArgs{\n\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\tOracleTables: datastream.StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableArray{\n\t\t\t\t\t\t\t\t\u0026datastream.StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableArgs{\n\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\tOracleColumns: datastream.StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableOracleColumnArray{\n\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableOracleColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"column\"),\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileOracleProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileBigqueryProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigOracleSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigOracleSourceConfigIncludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigOracleSourceConfigExcludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigOracleSourceConfigDropLargeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillAllArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillAllOracleExcludedObjectsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var source = new ConnectionProfile(\"source\", ConnectionProfileArgs.builder()\n .displayName(\"Oracle Source\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .oracleProfile(ConnectionProfileOracleProfileArgs.builder()\n .hostname(\"hostname\")\n .port(1521)\n .username(\"user\")\n .password(\"pass\")\n .databaseService(\"ORCL\")\n .build())\n .build());\n\n var destination = new ConnectionProfile(\"destination\", ConnectionProfileArgs.builder()\n .displayName(\"BigQuery Destination\")\n .location(\"us-central1\")\n .connectionProfileId(\"destination-profile\")\n .bigqueryProfile()\n .build());\n\n var stream5 = new Stream(\"stream5\", StreamArgs.builder()\n .displayName(\"Oracle to BigQuery\")\n .location(\"us-central1\")\n .streamId(\"my-stream\")\n .desiredState(\"RUNNING\")\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(source.id())\n .oracleSourceConfig(StreamSourceConfigOracleSourceConfigArgs.builder()\n .maxConcurrentCdcTasks(8)\n .maxConcurrentBackfillTasks(12)\n .includeObjects(StreamSourceConfigOracleSourceConfigIncludeObjectsArgs.builder()\n .oracleSchemas(StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaArgs.builder()\n .schema(\"schema\")\n .oracleTables(StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableArgs.builder()\n .table(\"table\")\n .oracleColumns(StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableOracleColumnArgs.builder()\n .column(\"column\")\n .build())\n .build())\n .build())\n .build())\n .excludeObjects(StreamSourceConfigOracleSourceConfigExcludeObjectsArgs.builder()\n .oracleSchemas(StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaArgs.builder()\n .schema(\"schema\")\n .oracleTables(StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableArgs.builder()\n .table(\"table\")\n .oracleColumns(StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableOracleColumnArgs.builder()\n .column(\"column\")\n .build())\n .build())\n .build())\n .build())\n .dropLargeObjects()\n .build())\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destination.id())\n .bigqueryDestinationConfig(StreamDestinationConfigBigqueryDestinationConfigArgs.builder()\n .dataFreshness(\"900s\")\n .sourceHierarchyDatasets(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs.builder()\n .datasetTemplate(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs.builder()\n .location(\"us-central1\")\n .build())\n .build())\n .build())\n .build())\n .backfillAll(StreamBackfillAllArgs.builder()\n .oracleExcludedObjects(StreamBackfillAllOracleExcludedObjectsArgs.builder()\n .oracleSchemas(StreamBackfillAllOracleExcludedObjectsOracleSchemaArgs.builder()\n .schema(\"schema\")\n .oracleTables(StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableArgs.builder()\n .table(\"table\")\n .oracleColumns(StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableOracleColumnArgs.builder()\n .column(\"column\")\n .build())\n .build())\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n source:\n type: gcp:datastream:ConnectionProfile\n properties:\n displayName: Oracle Source\n location: us-central1\n connectionProfileId: source-profile\n oracleProfile:\n hostname: hostname\n port: 1521\n username: user\n password: pass\n databaseService: ORCL\n destination:\n type: gcp:datastream:ConnectionProfile\n properties:\n displayName: BigQuery Destination\n location: us-central1\n connectionProfileId: destination-profile\n bigqueryProfile: {}\n stream5:\n type: gcp:datastream:Stream\n properties:\n displayName: Oracle to BigQuery\n location: us-central1\n streamId: my-stream\n desiredState: RUNNING\n sourceConfig:\n sourceConnectionProfile: ${source.id}\n oracleSourceConfig:\n maxConcurrentCdcTasks: 8\n maxConcurrentBackfillTasks: 12\n includeObjects:\n oracleSchemas:\n - schema: schema\n oracleTables:\n - table: table\n oracleColumns:\n - column: column\n excludeObjects:\n oracleSchemas:\n - schema: schema\n oracleTables:\n - table: table\n oracleColumns:\n - column: column\n dropLargeObjects: {}\n destinationConfig:\n destinationConnectionProfile: ${destination.id}\n bigqueryDestinationConfig:\n dataFreshness: 900s\n sourceHierarchyDatasets:\n datasetTemplate:\n location: us-central1\n backfillAll:\n oracleExcludedObjects:\n oracleSchemas:\n - schema: schema\n oracleTables:\n - table: table\n oracleColumns:\n - column: column\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Datastream Stream Sql Server\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst instance = new gcp.sql.DatabaseInstance(\"instance\", {\n name: \"sql-server\",\n databaseVersion: \"SQLSERVER_2019_STANDARD\",\n region: \"us-central1\",\n rootPassword: \"root-password\",\n deletionProtection: true,\n settings: {\n tier: \"db-custom-2-4096\",\n ipConfiguration: {\n authorizedNetworks: [\n {\n value: \"34.71.242.81\",\n },\n {\n value: \"34.72.28.29\",\n },\n {\n value: \"34.67.6.157\",\n },\n {\n value: \"34.67.234.134\",\n },\n {\n value: \"34.72.239.218\",\n },\n ],\n },\n },\n});\nconst user = new gcp.sql.User(\"user\", {\n name: \"user\",\n instance: instance.name,\n password: \"password\",\n});\nconst db = new gcp.sql.Database(\"db\", {\n name: \"db\",\n instance: instance.name,\n}, {\n dependsOn: [user],\n});\nconst source = new gcp.datastream.ConnectionProfile(\"source\", {\n displayName: \"SQL Server Source\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n sqlServerProfile: {\n hostname: instance.publicIpAddress,\n port: 1433,\n username: user.name,\n password: user.password,\n database: db.name,\n },\n});\nconst destination = new gcp.datastream.ConnectionProfile(\"destination\", {\n displayName: \"BigQuery Destination\",\n location: \"us-central1\",\n connectionProfileId: \"destination-profile\",\n bigqueryProfile: {},\n});\nconst _default = new gcp.datastream.Stream(\"default\", {\n displayName: \"SQL Server to BigQuery\",\n location: \"us-central1\",\n streamId: \"stream\",\n sourceConfig: {\n sourceConnectionProfile: source.id,\n sqlServerSourceConfig: {\n includeObjects: {\n schemas: [{\n schema: \"schema\",\n tables: [{\n table: \"table\",\n }],\n }],\n },\n },\n },\n destinationConfig: {\n destinationConnectionProfile: destination.id,\n bigqueryDestinationConfig: {\n dataFreshness: \"900s\",\n sourceHierarchyDatasets: {\n datasetTemplate: {\n location: \"us-central1\",\n },\n },\n },\n },\n backfillNone: {},\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ninstance = gcp.sql.DatabaseInstance(\"instance\",\n name=\"sql-server\",\n database_version=\"SQLSERVER_2019_STANDARD\",\n region=\"us-central1\",\n root_password=\"root-password\",\n deletion_protection=True,\n settings={\n \"tier\": \"db-custom-2-4096\",\n \"ip_configuration\": {\n \"authorized_networks\": [\n {\n \"value\": \"34.71.242.81\",\n },\n {\n \"value\": \"34.72.28.29\",\n },\n {\n \"value\": \"34.67.6.157\",\n },\n {\n \"value\": \"34.67.234.134\",\n },\n {\n \"value\": \"34.72.239.218\",\n },\n ],\n },\n })\nuser = gcp.sql.User(\"user\",\n name=\"user\",\n instance=instance.name,\n password=\"password\")\ndb = gcp.sql.Database(\"db\",\n name=\"db\",\n instance=instance.name,\n opts = pulumi.ResourceOptions(depends_on=[user]))\nsource = gcp.datastream.ConnectionProfile(\"source\",\n display_name=\"SQL Server Source\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n sql_server_profile={\n \"hostname\": instance.public_ip_address,\n \"port\": 1433,\n \"username\": user.name,\n \"password\": user.password,\n \"database\": db.name,\n })\ndestination = gcp.datastream.ConnectionProfile(\"destination\",\n display_name=\"BigQuery Destination\",\n location=\"us-central1\",\n connection_profile_id=\"destination-profile\",\n bigquery_profile={})\ndefault = gcp.datastream.Stream(\"default\",\n display_name=\"SQL Server to BigQuery\",\n location=\"us-central1\",\n stream_id=\"stream\",\n source_config={\n \"source_connection_profile\": source.id,\n \"sql_server_source_config\": {\n \"include_objects\": {\n \"schemas\": [{\n \"schema\": \"schema\",\n \"tables\": [{\n \"table\": \"table\",\n }],\n }],\n },\n },\n },\n destination_config={\n \"destination_connection_profile\": destination.id,\n \"bigquery_destination_config\": {\n \"data_freshness\": \"900s\",\n \"source_hierarchy_datasets\": {\n \"dataset_template\": {\n \"location\": \"us-central1\",\n },\n },\n },\n },\n backfill_none={})\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var instance = new Gcp.Sql.DatabaseInstance(\"instance\", new()\n {\n Name = \"sql-server\",\n DatabaseVersion = \"SQLSERVER_2019_STANDARD\",\n Region = \"us-central1\",\n RootPassword = \"root-password\",\n DeletionProtection = true,\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-custom-2-4096\",\n IpConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationArgs\n {\n AuthorizedNetworks = new[]\n {\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.71.242.81\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.28.29\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.6.157\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.234.134\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.239.218\",\n },\n },\n },\n },\n });\n\n var user = new Gcp.Sql.User(\"user\", new()\n {\n Name = \"user\",\n Instance = instance.Name,\n Password = \"password\",\n });\n\n var db = new Gcp.Sql.Database(\"db\", new()\n {\n Name = \"db\",\n Instance = instance.Name,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n user,\n },\n });\n\n var source = new Gcp.Datastream.ConnectionProfile(\"source\", new()\n {\n DisplayName = \"SQL Server Source\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n SqlServerProfile = new Gcp.Datastream.Inputs.ConnectionProfileSqlServerProfileArgs\n {\n Hostname = instance.PublicIpAddress,\n Port = 1433,\n Username = user.Name,\n Password = user.Password,\n Database = db.Name,\n },\n });\n\n var destination = new Gcp.Datastream.ConnectionProfile(\"destination\", new()\n {\n DisplayName = \"BigQuery Destination\",\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-profile\",\n BigqueryProfile = null,\n });\n\n var @default = new Gcp.Datastream.Stream(\"default\", new()\n {\n DisplayName = \"SQL Server to BigQuery\",\n Location = \"us-central1\",\n StreamId = \"stream\",\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = source.Id,\n SqlServerSourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigArgs\n {\n IncludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs\n {\n Schemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArgs\n {\n Schema = \"schema\",\n Tables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArgs\n {\n Table = \"table\",\n },\n },\n },\n },\n },\n },\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destination.Id,\n BigqueryDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigArgs\n {\n DataFreshness = \"900s\",\n SourceHierarchyDatasets = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs\n {\n DatasetTemplate = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs\n {\n Location = \"us-central1\",\n },\n },\n },\n },\n BackfillNone = null,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tinstance, err := sql.NewDatabaseInstance(ctx, \"instance\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"sql-server\"),\n\t\t\tDatabaseVersion: pulumi.String(\"SQLSERVER_2019_STANDARD\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tRootPassword: pulumi.String(\"root-password\"),\n\t\t\tDeletionProtection: pulumi.Bool(true),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-custom-2-4096\"),\n\t\t\t\tIpConfiguration: \u0026sql.DatabaseInstanceSettingsIpConfigurationArgs{\n\t\t\t\t\tAuthorizedNetworks: sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArray{\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.71.242.81\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.28.29\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.6.157\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.234.134\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.239.218\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuser, err := sql.NewUser(ctx, \"user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"user\"),\n\t\t\tInstance: instance.Name,\n\t\t\tPassword: pulumi.String(\"password\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdb, err := sql.NewDatabase(ctx, \"db\", \u0026sql.DatabaseArgs{\n\t\t\tName: pulumi.String(\"db\"),\n\t\t\tInstance: instance.Name,\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tuser,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsource, err := datastream.NewConnectionProfile(ctx, \"source\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"SQL Server Source\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tSqlServerProfile: \u0026datastream.ConnectionProfileSqlServerProfileArgs{\n\t\t\t\tHostname: instance.PublicIpAddress,\n\t\t\t\tPort: pulumi.Int(1433),\n\t\t\t\tUsername: user.Name,\n\t\t\t\tPassword: user.Password,\n\t\t\t\tDatabase: db.Name,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestination, err := datastream.NewConnectionProfile(ctx, \"destination\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"BigQuery Destination\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-profile\"),\n\t\t\tBigqueryProfile: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"default\", \u0026datastream.StreamArgs{\n\t\t\tDisplayName: pulumi.String(\"SQL Server to BigQuery\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tStreamId: pulumi.String(\"stream\"),\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: source.ID(),\n\t\t\t\tSqlServerSourceConfig: \u0026datastream.StreamSourceConfigSqlServerSourceConfigArgs{\n\t\t\t\t\tIncludeObjects: \u0026datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs{\n\t\t\t\t\t\tSchemas: datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArgs{\n\t\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\t\tTables: datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destination.ID(),\n\t\t\t\tBigqueryDestinationConfig: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigArgs{\n\t\t\t\t\tDataFreshness: pulumi.String(\"900s\"),\n\t\t\t\t\tSourceHierarchyDatasets: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs{\n\t\t\t\t\t\tDatasetTemplate: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs{\n\t\t\t\t\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillNone: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsIpConfigurationArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.sql.Database;\nimport com.pulumi.gcp.sql.DatabaseArgs;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileSqlServerProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileBigqueryProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillNoneArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var instance = new DatabaseInstance(\"instance\", DatabaseInstanceArgs.builder()\n .name(\"sql-server\")\n .databaseVersion(\"SQLSERVER_2019_STANDARD\")\n .region(\"us-central1\")\n .rootPassword(\"root-password\")\n .deletionProtection(\"true\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-custom-2-4096\")\n .ipConfiguration(DatabaseInstanceSettingsIpConfigurationArgs.builder()\n .authorizedNetworks( \n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.71.242.81\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.28.29\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.6.157\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.234.134\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.239.218\")\n .build())\n .build())\n .build())\n .build());\n\n var user = new User(\"user\", UserArgs.builder()\n .name(\"user\")\n .instance(instance.name())\n .password(\"password\")\n .build());\n\n var db = new Database(\"db\", DatabaseArgs.builder()\n .name(\"db\")\n .instance(instance.name())\n .build(), CustomResourceOptions.builder()\n .dependsOn(user)\n .build());\n\n var source = new ConnectionProfile(\"source\", ConnectionProfileArgs.builder()\n .displayName(\"SQL Server Source\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .sqlServerProfile(ConnectionProfileSqlServerProfileArgs.builder()\n .hostname(instance.publicIpAddress())\n .port(1433)\n .username(user.name())\n .password(user.password())\n .database(db.name())\n .build())\n .build());\n\n var destination = new ConnectionProfile(\"destination\", ConnectionProfileArgs.builder()\n .displayName(\"BigQuery Destination\")\n .location(\"us-central1\")\n .connectionProfileId(\"destination-profile\")\n .bigqueryProfile()\n .build());\n\n var default_ = new Stream(\"default\", StreamArgs.builder()\n .displayName(\"SQL Server to BigQuery\")\n .location(\"us-central1\")\n .streamId(\"stream\")\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(source.id())\n .sqlServerSourceConfig(StreamSourceConfigSqlServerSourceConfigArgs.builder()\n .includeObjects(StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs.builder()\n .schemas(StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArgs.builder()\n .schema(\"schema\")\n .tables(StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArgs.builder()\n .table(\"table\")\n .build())\n .build())\n .build())\n .build())\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destination.id())\n .bigqueryDestinationConfig(StreamDestinationConfigBigqueryDestinationConfigArgs.builder()\n .dataFreshness(\"900s\")\n .sourceHierarchyDatasets(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs.builder()\n .datasetTemplate(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs.builder()\n .location(\"us-central1\")\n .build())\n .build())\n .build())\n .build())\n .backfillNone()\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instance:\n type: gcp:sql:DatabaseInstance\n properties:\n name: sql-server\n databaseVersion: SQLSERVER_2019_STANDARD\n region: us-central1\n rootPassword: root-password\n deletionProtection: 'true'\n settings:\n tier: db-custom-2-4096\n ipConfiguration:\n authorizedNetworks:\n - value: 34.71.242.81\n - value: 34.72.28.29\n - value: 34.67.6.157\n - value: 34.67.234.134\n - value: 34.72.239.218\n db:\n type: gcp:sql:Database\n properties:\n name: db\n instance: ${instance.name}\n options:\n dependson:\n - ${user}\n user:\n type: gcp:sql:User\n properties:\n name: user\n instance: ${instance.name}\n password: password\n source:\n type: gcp:datastream:ConnectionProfile\n properties:\n displayName: SQL Server Source\n location: us-central1\n connectionProfileId: source-profile\n sqlServerProfile:\n hostname: ${instance.publicIpAddress}\n port: 1433\n username: ${user.name}\n password: ${user.password}\n database: ${db.name}\n destination:\n type: gcp:datastream:ConnectionProfile\n properties:\n displayName: BigQuery Destination\n location: us-central1\n connectionProfileId: destination-profile\n bigqueryProfile: {}\n default:\n type: gcp:datastream:Stream\n properties:\n displayName: SQL Server to BigQuery\n location: us-central1\n streamId: stream\n sourceConfig:\n sourceConnectionProfile: ${source.id}\n sqlServerSourceConfig:\n includeObjects:\n schemas:\n - schema: schema\n tables:\n - table: table\n destinationConfig:\n destinationConnectionProfile: ${destination.id}\n bigqueryDestinationConfig:\n dataFreshness: 900s\n sourceHierarchyDatasets:\n datasetTemplate:\n location: us-central1\n backfillNone: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Datastream Stream Postgresql Bigquery Dataset Id\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as random from \"@pulumi/random\";\n\nconst postgres = new gcp.bigquery.Dataset(\"postgres\", {\n datasetId: \"postgres\",\n friendlyName: \"postgres\",\n description: \"Database of postgres\",\n location: \"us-central1\",\n});\nconst destinationConnectionProfile2 = new gcp.datastream.ConnectionProfile(\"destination_connection_profile2\", {\n displayName: \"Connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"dest-profile\",\n bigqueryProfile: {},\n});\nconst instance = new gcp.sql.DatabaseInstance(\"instance\", {\n name: \"instance-name\",\n databaseVersion: \"MYSQL_8_0\",\n region: \"us-central1\",\n settings: {\n tier: \"db-f1-micro\",\n backupConfiguration: {\n enabled: true,\n binaryLogEnabled: true,\n },\n ipConfiguration: {\n authorizedNetworks: [\n {\n value: \"34.71.242.81\",\n },\n {\n value: \"34.72.28.29\",\n },\n {\n value: \"34.67.6.157\",\n },\n {\n value: \"34.67.234.134\",\n },\n {\n value: \"34.72.239.218\",\n },\n ],\n },\n },\n deletionProtection: false,\n});\nconst pwd = new random.RandomPassword(\"pwd\", {\n length: 16,\n special: false,\n});\nconst user = new gcp.sql.User(\"user\", {\n name: \"my-user\",\n instance: instance.name,\n host: \"%\",\n password: pwd.result,\n});\nconst sourceConnectionProfile = new gcp.datastream.ConnectionProfile(\"source_connection_profile\", {\n displayName: \"Source connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n mysqlProfile: {\n hostname: instance.publicIpAddress,\n username: user.name,\n password: user.password,\n },\n});\nconst _default = new gcp.datastream.Stream(\"default\", {\n displayName: \"postgres to bigQuery\",\n location: \"us-central1\",\n streamId: \"postgres-bigquery\",\n sourceConfig: {\n sourceConnectionProfile: sourceConnectionProfile.id,\n mysqlSourceConfig: {},\n },\n destinationConfig: {\n destinationConnectionProfile: destinationConnectionProfile2.id,\n bigqueryDestinationConfig: {\n dataFreshness: \"900s\",\n singleTargetDataset: {\n datasetId: postgres.id,\n },\n },\n },\n backfillAll: {},\n});\nconst db = new gcp.sql.Database(\"db\", {\n instance: instance.name,\n name: \"db\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_random as random\n\npostgres = gcp.bigquery.Dataset(\"postgres\",\n dataset_id=\"postgres\",\n friendly_name=\"postgres\",\n description=\"Database of postgres\",\n location=\"us-central1\")\ndestination_connection_profile2 = gcp.datastream.ConnectionProfile(\"destination_connection_profile2\",\n display_name=\"Connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"dest-profile\",\n bigquery_profile={})\ninstance = gcp.sql.DatabaseInstance(\"instance\",\n name=\"instance-name\",\n database_version=\"MYSQL_8_0\",\n region=\"us-central1\",\n settings={\n \"tier\": \"db-f1-micro\",\n \"backup_configuration\": {\n \"enabled\": True,\n \"binary_log_enabled\": True,\n },\n \"ip_configuration\": {\n \"authorized_networks\": [\n {\n \"value\": \"34.71.242.81\",\n },\n {\n \"value\": \"34.72.28.29\",\n },\n {\n \"value\": \"34.67.6.157\",\n },\n {\n \"value\": \"34.67.234.134\",\n },\n {\n \"value\": \"34.72.239.218\",\n },\n ],\n },\n },\n deletion_protection=False)\npwd = random.RandomPassword(\"pwd\",\n length=16,\n special=False)\nuser = gcp.sql.User(\"user\",\n name=\"my-user\",\n instance=instance.name,\n host=\"%\",\n password=pwd.result)\nsource_connection_profile = gcp.datastream.ConnectionProfile(\"source_connection_profile\",\n display_name=\"Source connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n mysql_profile={\n \"hostname\": instance.public_ip_address,\n \"username\": user.name,\n \"password\": user.password,\n })\ndefault = gcp.datastream.Stream(\"default\",\n display_name=\"postgres to bigQuery\",\n location=\"us-central1\",\n stream_id=\"postgres-bigquery\",\n source_config={\n \"source_connection_profile\": source_connection_profile.id,\n \"mysql_source_config\": {},\n },\n destination_config={\n \"destination_connection_profile\": destination_connection_profile2.id,\n \"bigquery_destination_config\": {\n \"data_freshness\": \"900s\",\n \"single_target_dataset\": {\n \"dataset_id\": postgres.id,\n },\n },\n },\n backfill_all={})\ndb = gcp.sql.Database(\"db\",\n instance=instance.name,\n name=\"db\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Random = Pulumi.Random;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var postgres = new Gcp.BigQuery.Dataset(\"postgres\", new()\n {\n DatasetId = \"postgres\",\n FriendlyName = \"postgres\",\n Description = \"Database of postgres\",\n Location = \"us-central1\",\n });\n\n var destinationConnectionProfile2 = new Gcp.Datastream.ConnectionProfile(\"destination_connection_profile2\", new()\n {\n DisplayName = \"Connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"dest-profile\",\n BigqueryProfile = null,\n });\n\n var instance = new Gcp.Sql.DatabaseInstance(\"instance\", new()\n {\n Name = \"instance-name\",\n DatabaseVersion = \"MYSQL_8_0\",\n Region = \"us-central1\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-f1-micro\",\n BackupConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsBackupConfigurationArgs\n {\n Enabled = true,\n BinaryLogEnabled = true,\n },\n IpConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationArgs\n {\n AuthorizedNetworks = new[]\n {\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.71.242.81\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.28.29\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.6.157\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.234.134\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.239.218\",\n },\n },\n },\n },\n DeletionProtection = false,\n });\n\n var pwd = new Random.RandomPassword(\"pwd\", new()\n {\n Length = 16,\n Special = false,\n });\n\n var user = new Gcp.Sql.User(\"user\", new()\n {\n Name = \"my-user\",\n Instance = instance.Name,\n Host = \"%\",\n Password = pwd.Result,\n });\n\n var sourceConnectionProfile = new Gcp.Datastream.ConnectionProfile(\"source_connection_profile\", new()\n {\n DisplayName = \"Source connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n MysqlProfile = new Gcp.Datastream.Inputs.ConnectionProfileMysqlProfileArgs\n {\n Hostname = instance.PublicIpAddress,\n Username = user.Name,\n Password = user.Password,\n },\n });\n\n var @default = new Gcp.Datastream.Stream(\"default\", new()\n {\n DisplayName = \"postgres to bigQuery\",\n Location = \"us-central1\",\n StreamId = \"postgres-bigquery\",\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = sourceConnectionProfile.Id,\n MysqlSourceConfig = null,\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destinationConnectionProfile2.Id,\n BigqueryDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigArgs\n {\n DataFreshness = \"900s\",\n SingleTargetDataset = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetArgs\n {\n DatasetId = postgres.Id,\n },\n },\n },\n BackfillAll = null,\n });\n\n var db = new Gcp.Sql.Database(\"db\", new()\n {\n Instance = instance.Name,\n Name = \"db\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi-random/sdk/v4/go/random\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpostgres, err := bigquery.NewDataset(ctx, \"postgres\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"postgres\"),\n\t\t\tFriendlyName: pulumi.String(\"postgres\"),\n\t\t\tDescription: pulumi.String(\"Database of postgres\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestinationConnectionProfile2, err := datastream.NewConnectionProfile(ctx, \"destination_connection_profile2\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"dest-profile\"),\n\t\t\tBigqueryProfile: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinstance, err := sql.NewDatabaseInstance(ctx, \"instance\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"instance-name\"),\n\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_8_0\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-f1-micro\"),\n\t\t\t\tBackupConfiguration: \u0026sql.DatabaseInstanceSettingsBackupConfigurationArgs{\n\t\t\t\t\tEnabled: pulumi.Bool(true),\n\t\t\t\t\tBinaryLogEnabled: pulumi.Bool(true),\n\t\t\t\t},\n\t\t\t\tIpConfiguration: \u0026sql.DatabaseInstanceSettingsIpConfigurationArgs{\n\t\t\t\t\tAuthorizedNetworks: sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArray{\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.71.242.81\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.28.29\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.6.157\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.234.134\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.239.218\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpwd, err := random.NewRandomPassword(ctx, \"pwd\", \u0026random.RandomPasswordArgs{\n\t\t\tLength: pulumi.Int(16),\n\t\t\tSpecial: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuser, err := sql.NewUser(ctx, \"user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"my-user\"),\n\t\t\tInstance: instance.Name,\n\t\t\tHost: pulumi.String(\"%\"),\n\t\t\tPassword: pwd.Result,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsourceConnectionProfile, err := datastream.NewConnectionProfile(ctx, \"source_connection_profile\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Source connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tMysqlProfile: \u0026datastream.ConnectionProfileMysqlProfileArgs{\n\t\t\t\tHostname: instance.PublicIpAddress,\n\t\t\t\tUsername: user.Name,\n\t\t\t\tPassword: user.Password,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"default\", \u0026datastream.StreamArgs{\n\t\t\tDisplayName: pulumi.String(\"postgres to bigQuery\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tStreamId: pulumi.String(\"postgres-bigquery\"),\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: sourceConnectionProfile.ID(),\n\t\t\t\tMysqlSourceConfig: nil,\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destinationConnectionProfile2.ID(),\n\t\t\t\tBigqueryDestinationConfig: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigArgs{\n\t\t\t\t\tDataFreshness: pulumi.String(\"900s\"),\n\t\t\t\t\tSingleTargetDataset: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetArgs{\n\t\t\t\t\t\tDatasetId: postgres.ID(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillAll: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = sql.NewDatabase(ctx, \"db\", \u0026sql.DatabaseArgs{\n\t\t\tInstance: instance.Name,\n\t\t\tName: pulumi.String(\"db\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileBigqueryProfileArgs;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsBackupConfigurationArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsIpConfigurationArgs;\nimport com.pulumi.random.RandomPassword;\nimport com.pulumi.random.RandomPasswordArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileMysqlProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigMysqlSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillAllArgs;\nimport com.pulumi.gcp.sql.Database;\nimport com.pulumi.gcp.sql.DatabaseArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var postgres = new Dataset(\"postgres\", DatasetArgs.builder()\n .datasetId(\"postgres\")\n .friendlyName(\"postgres\")\n .description(\"Database of postgres\")\n .location(\"us-central1\")\n .build());\n\n var destinationConnectionProfile2 = new ConnectionProfile(\"destinationConnectionProfile2\", ConnectionProfileArgs.builder()\n .displayName(\"Connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"dest-profile\")\n .bigqueryProfile()\n .build());\n\n var instance = new DatabaseInstance(\"instance\", DatabaseInstanceArgs.builder()\n .name(\"instance-name\")\n .databaseVersion(\"MYSQL_8_0\")\n .region(\"us-central1\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-f1-micro\")\n .backupConfiguration(DatabaseInstanceSettingsBackupConfigurationArgs.builder()\n .enabled(true)\n .binaryLogEnabled(true)\n .build())\n .ipConfiguration(DatabaseInstanceSettingsIpConfigurationArgs.builder()\n .authorizedNetworks( \n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.71.242.81\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.28.29\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.6.157\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.234.134\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.239.218\")\n .build())\n .build())\n .build())\n .deletionProtection(false)\n .build());\n\n var pwd = new RandomPassword(\"pwd\", RandomPasswordArgs.builder()\n .length(16)\n .special(false)\n .build());\n\n var user = new User(\"user\", UserArgs.builder()\n .name(\"my-user\")\n .instance(instance.name())\n .host(\"%\")\n .password(pwd.result())\n .build());\n\n var sourceConnectionProfile = new ConnectionProfile(\"sourceConnectionProfile\", ConnectionProfileArgs.builder()\n .displayName(\"Source connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .mysqlProfile(ConnectionProfileMysqlProfileArgs.builder()\n .hostname(instance.publicIpAddress())\n .username(user.name())\n .password(user.password())\n .build())\n .build());\n\n var default_ = new Stream(\"default\", StreamArgs.builder()\n .displayName(\"postgres to bigQuery\")\n .location(\"us-central1\")\n .streamId(\"postgres-bigquery\")\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(sourceConnectionProfile.id())\n .mysqlSourceConfig()\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destinationConnectionProfile2.id())\n .bigqueryDestinationConfig(StreamDestinationConfigBigqueryDestinationConfigArgs.builder()\n .dataFreshness(\"900s\")\n .singleTargetDataset(StreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetArgs.builder()\n .datasetId(postgres.id())\n .build())\n .build())\n .build())\n .backfillAll()\n .build());\n\n var db = new Database(\"db\", DatabaseArgs.builder()\n .instance(instance.name())\n .name(\"db\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n postgres:\n type: gcp:bigquery:Dataset\n properties:\n datasetId: postgres\n friendlyName: postgres\n description: Database of postgres\n location: us-central1\n default:\n type: gcp:datastream:Stream\n properties:\n displayName: postgres to bigQuery\n location: us-central1\n streamId: postgres-bigquery\n sourceConfig:\n sourceConnectionProfile: ${sourceConnectionProfile.id}\n mysqlSourceConfig: {}\n destinationConfig:\n destinationConnectionProfile: ${destinationConnectionProfile2.id}\n bigqueryDestinationConfig:\n dataFreshness: 900s\n singleTargetDataset:\n datasetId: ${postgres.id}\n backfillAll: {}\n destinationConnectionProfile2:\n type: gcp:datastream:ConnectionProfile\n name: destination_connection_profile2\n properties:\n displayName: Connection profile\n location: us-central1\n connectionProfileId: dest-profile\n bigqueryProfile: {}\n instance:\n type: gcp:sql:DatabaseInstance\n properties:\n name: instance-name\n databaseVersion: MYSQL_8_0\n region: us-central1\n settings:\n tier: db-f1-micro\n backupConfiguration:\n enabled: true\n binaryLogEnabled: true\n ipConfiguration:\n authorizedNetworks:\n - value: 34.71.242.81\n - value: 34.72.28.29\n - value: 34.67.6.157\n - value: 34.67.234.134\n - value: 34.72.239.218\n deletionProtection: false\n db:\n type: gcp:sql:Database\n properties:\n instance: ${instance.name}\n name: db\n pwd:\n type: random:RandomPassword\n properties:\n length: 16\n special: false\n user:\n type: gcp:sql:User\n properties:\n name: my-user\n instance: ${instance.name}\n host: '%'\n password: ${pwd.result}\n sourceConnectionProfile:\n type: gcp:datastream:ConnectionProfile\n name: source_connection_profile\n properties:\n displayName: Source connection profile\n location: us-central1\n connectionProfileId: source-profile\n mysqlProfile:\n hostname: ${instance.publicIpAddress}\n username: ${user.name}\n password: ${user.password}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Datastream Stream Bigquery\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as random from \"@pulumi/random\";\n\nconst project = gcp.organizations.getProject({});\nconst instance = new gcp.sql.DatabaseInstance(\"instance\", {\n name: \"my-instance\",\n databaseVersion: \"MYSQL_8_0\",\n region: \"us-central1\",\n settings: {\n tier: \"db-f1-micro\",\n backupConfiguration: {\n enabled: true,\n binaryLogEnabled: true,\n },\n ipConfiguration: {\n authorizedNetworks: [\n {\n value: \"34.71.242.81\",\n },\n {\n value: \"34.72.28.29\",\n },\n {\n value: \"34.67.6.157\",\n },\n {\n value: \"34.67.234.134\",\n },\n {\n value: \"34.72.239.218\",\n },\n ],\n },\n },\n deletionProtection: true,\n});\nconst db = new gcp.sql.Database(\"db\", {\n instance: instance.name,\n name: \"db\",\n});\nconst pwd = new random.RandomPassword(\"pwd\", {\n length: 16,\n special: false,\n});\nconst user = new gcp.sql.User(\"user\", {\n name: \"user\",\n instance: instance.name,\n host: \"%\",\n password: pwd.result,\n});\nconst sourceConnectionProfile = new gcp.datastream.ConnectionProfile(\"source_connection_profile\", {\n displayName: \"Source connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n mysqlProfile: {\n hostname: instance.publicIpAddress,\n username: user.name,\n password: user.password,\n },\n});\nconst bqSa = gcp.bigquery.getDefaultServiceAccount({});\nconst bigqueryKeyUser = new gcp.kms.CryptoKeyIAMMember(\"bigquery_key_user\", {\n cryptoKeyId: \"bigquery-kms-name\",\n role: \"roles/cloudkms.cryptoKeyEncrypterDecrypter\",\n member: bqSa.then(bqSa =\u003e `serviceAccount:${bqSa.email}`),\n});\nconst destinationConnectionProfile = new gcp.datastream.ConnectionProfile(\"destination_connection_profile\", {\n displayName: \"Connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"destination-profile\",\n bigqueryProfile: {},\n});\nconst _default = new gcp.datastream.Stream(\"default\", {\n streamId: \"my-stream\",\n location: \"us-central1\",\n displayName: \"my stream\",\n sourceConfig: {\n sourceConnectionProfile: sourceConnectionProfile.id,\n mysqlSourceConfig: {},\n },\n destinationConfig: {\n destinationConnectionProfile: destinationConnectionProfile.id,\n bigqueryDestinationConfig: {\n sourceHierarchyDatasets: {\n datasetTemplate: {\n location: \"us-central1\",\n kmsKeyName: \"bigquery-kms-name\",\n },\n },\n },\n },\n backfillNone: {},\n}, {\n dependsOn: [bigqueryKeyUser],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_random as random\n\nproject = gcp.organizations.get_project()\ninstance = gcp.sql.DatabaseInstance(\"instance\",\n name=\"my-instance\",\n database_version=\"MYSQL_8_0\",\n region=\"us-central1\",\n settings={\n \"tier\": \"db-f1-micro\",\n \"backup_configuration\": {\n \"enabled\": True,\n \"binary_log_enabled\": True,\n },\n \"ip_configuration\": {\n \"authorized_networks\": [\n {\n \"value\": \"34.71.242.81\",\n },\n {\n \"value\": \"34.72.28.29\",\n },\n {\n \"value\": \"34.67.6.157\",\n },\n {\n \"value\": \"34.67.234.134\",\n },\n {\n \"value\": \"34.72.239.218\",\n },\n ],\n },\n },\n deletion_protection=True)\ndb = gcp.sql.Database(\"db\",\n instance=instance.name,\n name=\"db\")\npwd = random.RandomPassword(\"pwd\",\n length=16,\n special=False)\nuser = gcp.sql.User(\"user\",\n name=\"user\",\n instance=instance.name,\n host=\"%\",\n password=pwd.result)\nsource_connection_profile = gcp.datastream.ConnectionProfile(\"source_connection_profile\",\n display_name=\"Source connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n mysql_profile={\n \"hostname\": instance.public_ip_address,\n \"username\": user.name,\n \"password\": user.password,\n })\nbq_sa = gcp.bigquery.get_default_service_account()\nbigquery_key_user = gcp.kms.CryptoKeyIAMMember(\"bigquery_key_user\",\n crypto_key_id=\"bigquery-kms-name\",\n role=\"roles/cloudkms.cryptoKeyEncrypterDecrypter\",\n member=f\"serviceAccount:{bq_sa.email}\")\ndestination_connection_profile = gcp.datastream.ConnectionProfile(\"destination_connection_profile\",\n display_name=\"Connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"destination-profile\",\n bigquery_profile={})\ndefault = gcp.datastream.Stream(\"default\",\n stream_id=\"my-stream\",\n location=\"us-central1\",\n display_name=\"my stream\",\n source_config={\n \"source_connection_profile\": source_connection_profile.id,\n \"mysql_source_config\": {},\n },\n destination_config={\n \"destination_connection_profile\": destination_connection_profile.id,\n \"bigquery_destination_config\": {\n \"source_hierarchy_datasets\": {\n \"dataset_template\": {\n \"location\": \"us-central1\",\n \"kms_key_name\": \"bigquery-kms-name\",\n },\n },\n },\n },\n backfill_none={},\n opts = pulumi.ResourceOptions(depends_on=[bigquery_key_user]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Random = Pulumi.Random;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var instance = new Gcp.Sql.DatabaseInstance(\"instance\", new()\n {\n Name = \"my-instance\",\n DatabaseVersion = \"MYSQL_8_0\",\n Region = \"us-central1\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-f1-micro\",\n BackupConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsBackupConfigurationArgs\n {\n Enabled = true,\n BinaryLogEnabled = true,\n },\n IpConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationArgs\n {\n AuthorizedNetworks = new[]\n {\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.71.242.81\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.28.29\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.6.157\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.234.134\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.239.218\",\n },\n },\n },\n },\n DeletionProtection = true,\n });\n\n var db = new Gcp.Sql.Database(\"db\", new()\n {\n Instance = instance.Name,\n Name = \"db\",\n });\n\n var pwd = new Random.RandomPassword(\"pwd\", new()\n {\n Length = 16,\n Special = false,\n });\n\n var user = new Gcp.Sql.User(\"user\", new()\n {\n Name = \"user\",\n Instance = instance.Name,\n Host = \"%\",\n Password = pwd.Result,\n });\n\n var sourceConnectionProfile = new Gcp.Datastream.ConnectionProfile(\"source_connection_profile\", new()\n {\n DisplayName = \"Source connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n MysqlProfile = new Gcp.Datastream.Inputs.ConnectionProfileMysqlProfileArgs\n {\n Hostname = instance.PublicIpAddress,\n Username = user.Name,\n Password = user.Password,\n },\n });\n\n var bqSa = Gcp.BigQuery.GetDefaultServiceAccount.Invoke();\n\n var bigqueryKeyUser = new Gcp.Kms.CryptoKeyIAMMember(\"bigquery_key_user\", new()\n {\n CryptoKeyId = \"bigquery-kms-name\",\n Role = \"roles/cloudkms.cryptoKeyEncrypterDecrypter\",\n Member = $\"serviceAccount:{bqSa.Apply(getDefaultServiceAccountResult =\u003e getDefaultServiceAccountResult.Email)}\",\n });\n\n var destinationConnectionProfile = new Gcp.Datastream.ConnectionProfile(\"destination_connection_profile\", new()\n {\n DisplayName = \"Connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-profile\",\n BigqueryProfile = null,\n });\n\n var @default = new Gcp.Datastream.Stream(\"default\", new()\n {\n StreamId = \"my-stream\",\n Location = \"us-central1\",\n DisplayName = \"my stream\",\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = sourceConnectionProfile.Id,\n MysqlSourceConfig = null,\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destinationConnectionProfile.Id,\n BigqueryDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigArgs\n {\n SourceHierarchyDatasets = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs\n {\n DatasetTemplate = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs\n {\n Location = \"us-central1\",\n KmsKeyName = \"bigquery-kms-name\",\n },\n },\n },\n },\n BackfillNone = null,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n bigqueryKeyUser,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi-random/sdk/v4/go/random\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinstance, err := sql.NewDatabaseInstance(ctx, \"instance\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"my-instance\"),\n\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_8_0\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-f1-micro\"),\n\t\t\t\tBackupConfiguration: \u0026sql.DatabaseInstanceSettingsBackupConfigurationArgs{\n\t\t\t\t\tEnabled: pulumi.Bool(true),\n\t\t\t\t\tBinaryLogEnabled: pulumi.Bool(true),\n\t\t\t\t},\n\t\t\t\tIpConfiguration: \u0026sql.DatabaseInstanceSettingsIpConfigurationArgs{\n\t\t\t\t\tAuthorizedNetworks: sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArray{\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.71.242.81\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.28.29\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.6.157\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.234.134\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.239.218\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = sql.NewDatabase(ctx, \"db\", \u0026sql.DatabaseArgs{\n\t\t\tInstance: instance.Name,\n\t\t\tName: pulumi.String(\"db\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpwd, err := random.NewRandomPassword(ctx, \"pwd\", \u0026random.RandomPasswordArgs{\n\t\t\tLength: pulumi.Int(16),\n\t\t\tSpecial: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuser, err := sql.NewUser(ctx, \"user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"user\"),\n\t\t\tInstance: instance.Name,\n\t\t\tHost: pulumi.String(\"%\"),\n\t\t\tPassword: pwd.Result,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsourceConnectionProfile, err := datastream.NewConnectionProfile(ctx, \"source_connection_profile\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Source connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tMysqlProfile: \u0026datastream.ConnectionProfileMysqlProfileArgs{\n\t\t\t\tHostname: instance.PublicIpAddress,\n\t\t\t\tUsername: user.Name,\n\t\t\t\tPassword: user.Password,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbqSa, err := bigquery.GetDefaultServiceAccount(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbigqueryKeyUser, err := kms.NewCryptoKeyIAMMember(ctx, \"bigquery_key_user\", \u0026kms.CryptoKeyIAMMemberArgs{\n\t\t\tCryptoKeyId: pulumi.String(\"bigquery-kms-name\"),\n\t\t\tRole: pulumi.String(\"roles/cloudkms.cryptoKeyEncrypterDecrypter\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:%v\", bqSa.Email),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestinationConnectionProfile, err := datastream.NewConnectionProfile(ctx, \"destination_connection_profile\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-profile\"),\n\t\t\tBigqueryProfile: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"default\", \u0026datastream.StreamArgs{\n\t\t\tStreamId: pulumi.String(\"my-stream\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDisplayName: pulumi.String(\"my stream\"),\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: sourceConnectionProfile.ID(),\n\t\t\t\tMysqlSourceConfig: nil,\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destinationConnectionProfile.ID(),\n\t\t\t\tBigqueryDestinationConfig: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigArgs{\n\t\t\t\t\tSourceHierarchyDatasets: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs{\n\t\t\t\t\t\tDatasetTemplate: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs{\n\t\t\t\t\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\t\t\t\t\tKmsKeyName: pulumi.String(\"bigquery-kms-name\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillNone: nil,\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tbigqueryKeyUser,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsBackupConfigurationArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsIpConfigurationArgs;\nimport com.pulumi.gcp.sql.Database;\nimport com.pulumi.gcp.sql.DatabaseArgs;\nimport com.pulumi.random.RandomPassword;\nimport com.pulumi.random.RandomPasswordArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileMysqlProfileArgs;\nimport com.pulumi.gcp.bigquery.BigqueryFunctions;\nimport com.pulumi.gcp.bigquery.inputs.GetDefaultServiceAccountArgs;\nimport com.pulumi.gcp.kms.CryptoKeyIAMMember;\nimport com.pulumi.gcp.kms.CryptoKeyIAMMemberArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileBigqueryProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigMysqlSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillNoneArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var instance = new DatabaseInstance(\"instance\", DatabaseInstanceArgs.builder()\n .name(\"my-instance\")\n .databaseVersion(\"MYSQL_8_0\")\n .region(\"us-central1\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-f1-micro\")\n .backupConfiguration(DatabaseInstanceSettingsBackupConfigurationArgs.builder()\n .enabled(true)\n .binaryLogEnabled(true)\n .build())\n .ipConfiguration(DatabaseInstanceSettingsIpConfigurationArgs.builder()\n .authorizedNetworks( \n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.71.242.81\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.28.29\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.6.157\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.234.134\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.239.218\")\n .build())\n .build())\n .build())\n .deletionProtection(true)\n .build());\n\n var db = new Database(\"db\", DatabaseArgs.builder()\n .instance(instance.name())\n .name(\"db\")\n .build());\n\n var pwd = new RandomPassword(\"pwd\", RandomPasswordArgs.builder()\n .length(16)\n .special(false)\n .build());\n\n var user = new User(\"user\", UserArgs.builder()\n .name(\"user\")\n .instance(instance.name())\n .host(\"%\")\n .password(pwd.result())\n .build());\n\n var sourceConnectionProfile = new ConnectionProfile(\"sourceConnectionProfile\", ConnectionProfileArgs.builder()\n .displayName(\"Source connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .mysqlProfile(ConnectionProfileMysqlProfileArgs.builder()\n .hostname(instance.publicIpAddress())\n .username(user.name())\n .password(user.password())\n .build())\n .build());\n\n final var bqSa = BigqueryFunctions.getDefaultServiceAccount();\n\n var bigqueryKeyUser = new CryptoKeyIAMMember(\"bigqueryKeyUser\", CryptoKeyIAMMemberArgs.builder()\n .cryptoKeyId(\"bigquery-kms-name\")\n .role(\"roles/cloudkms.cryptoKeyEncrypterDecrypter\")\n .member(String.format(\"serviceAccount:%s\", bqSa.applyValue(getDefaultServiceAccountResult -\u003e getDefaultServiceAccountResult.email())))\n .build());\n\n var destinationConnectionProfile = new ConnectionProfile(\"destinationConnectionProfile\", ConnectionProfileArgs.builder()\n .displayName(\"Connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"destination-profile\")\n .bigqueryProfile()\n .build());\n\n var default_ = new Stream(\"default\", StreamArgs.builder()\n .streamId(\"my-stream\")\n .location(\"us-central1\")\n .displayName(\"my stream\")\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(sourceConnectionProfile.id())\n .mysqlSourceConfig()\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destinationConnectionProfile.id())\n .bigqueryDestinationConfig(StreamDestinationConfigBigqueryDestinationConfigArgs.builder()\n .sourceHierarchyDatasets(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs.builder()\n .datasetTemplate(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs.builder()\n .location(\"us-central1\")\n .kmsKeyName(\"bigquery-kms-name\")\n .build())\n .build())\n .build())\n .build())\n .backfillNone()\n .build(), CustomResourceOptions.builder()\n .dependsOn(bigqueryKeyUser)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instance:\n type: gcp:sql:DatabaseInstance\n properties:\n name: my-instance\n databaseVersion: MYSQL_8_0\n region: us-central1\n settings:\n tier: db-f1-micro\n backupConfiguration:\n enabled: true\n binaryLogEnabled: true\n ipConfiguration:\n authorizedNetworks:\n - value: 34.71.242.81\n - value: 34.72.28.29\n - value: 34.67.6.157\n - value: 34.67.234.134\n - value: 34.72.239.218\n deletionProtection: true\n db:\n type: gcp:sql:Database\n properties:\n instance: ${instance.name}\n name: db\n pwd:\n type: random:RandomPassword\n properties:\n length: 16\n special: false\n user:\n type: gcp:sql:User\n properties:\n name: user\n instance: ${instance.name}\n host: '%'\n password: ${pwd.result}\n sourceConnectionProfile:\n type: gcp:datastream:ConnectionProfile\n name: source_connection_profile\n properties:\n displayName: Source connection profile\n location: us-central1\n connectionProfileId: source-profile\n mysqlProfile:\n hostname: ${instance.publicIpAddress}\n username: ${user.name}\n password: ${user.password}\n bigqueryKeyUser:\n type: gcp:kms:CryptoKeyIAMMember\n name: bigquery_key_user\n properties:\n cryptoKeyId: bigquery-kms-name\n role: roles/cloudkms.cryptoKeyEncrypterDecrypter\n member: serviceAccount:${bqSa.email}\n destinationConnectionProfile:\n type: gcp:datastream:ConnectionProfile\n name: destination_connection_profile\n properties:\n displayName: Connection profile\n location: us-central1\n connectionProfileId: destination-profile\n bigqueryProfile: {}\n default:\n type: gcp:datastream:Stream\n properties:\n streamId: my-stream\n location: us-central1\n displayName: my stream\n sourceConfig:\n sourceConnectionProfile: ${sourceConnectionProfile.id}\n mysqlSourceConfig: {}\n destinationConfig:\n destinationConnectionProfile: ${destinationConnectionProfile.id}\n bigqueryDestinationConfig:\n sourceHierarchyDatasets:\n datasetTemplate:\n location: us-central1\n kmsKeyName: bigquery-kms-name\n backfillNone: {}\n options:\n dependson:\n - ${bigqueryKeyUser}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n bqSa:\n fn::invoke:\n Function: gcp:bigquery:getDefaultServiceAccount\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Datastream Stream Bigquery Append Only\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as random from \"@pulumi/random\";\n\nconst project = gcp.organizations.getProject({});\nconst instance = new gcp.sql.DatabaseInstance(\"instance\", {\n name: \"my-instance\",\n databaseVersion: \"MYSQL_8_0\",\n region: \"us-central1\",\n settings: {\n tier: \"db-f1-micro\",\n backupConfiguration: {\n enabled: true,\n binaryLogEnabled: true,\n },\n ipConfiguration: {\n authorizedNetworks: [\n {\n value: \"34.71.242.81\",\n },\n {\n value: \"34.72.28.29\",\n },\n {\n value: \"34.67.6.157\",\n },\n {\n value: \"34.67.234.134\",\n },\n {\n value: \"34.72.239.218\",\n },\n ],\n },\n },\n deletionProtection: true,\n});\nconst db = new gcp.sql.Database(\"db\", {\n instance: instance.name,\n name: \"db\",\n});\nconst pwd = new random.RandomPassword(\"pwd\", {\n length: 16,\n special: false,\n});\nconst user = new gcp.sql.User(\"user\", {\n name: \"user\",\n instance: instance.name,\n host: \"%\",\n password: pwd.result,\n});\nconst sourceConnectionProfile = new gcp.datastream.ConnectionProfile(\"source_connection_profile\", {\n displayName: \"Source connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n mysqlProfile: {\n hostname: instance.publicIpAddress,\n username: user.name,\n password: user.password,\n },\n});\nconst destinationConnectionProfile = new gcp.datastream.ConnectionProfile(\"destination_connection_profile\", {\n displayName: \"Connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"destination-profile\",\n bigqueryProfile: {},\n});\nconst _default = new gcp.datastream.Stream(\"default\", {\n streamId: \"my-stream\",\n location: \"us-central1\",\n displayName: \"my stream\",\n sourceConfig: {\n sourceConnectionProfile: sourceConnectionProfile.id,\n mysqlSourceConfig: {},\n },\n destinationConfig: {\n destinationConnectionProfile: destinationConnectionProfile.id,\n bigqueryDestinationConfig: {\n sourceHierarchyDatasets: {\n datasetTemplate: {\n location: \"us-central1\",\n },\n },\n appendOnly: {},\n },\n },\n backfillNone: {},\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_random as random\n\nproject = gcp.organizations.get_project()\ninstance = gcp.sql.DatabaseInstance(\"instance\",\n name=\"my-instance\",\n database_version=\"MYSQL_8_0\",\n region=\"us-central1\",\n settings={\n \"tier\": \"db-f1-micro\",\n \"backup_configuration\": {\n \"enabled\": True,\n \"binary_log_enabled\": True,\n },\n \"ip_configuration\": {\n \"authorized_networks\": [\n {\n \"value\": \"34.71.242.81\",\n },\n {\n \"value\": \"34.72.28.29\",\n },\n {\n \"value\": \"34.67.6.157\",\n },\n {\n \"value\": \"34.67.234.134\",\n },\n {\n \"value\": \"34.72.239.218\",\n },\n ],\n },\n },\n deletion_protection=True)\ndb = gcp.sql.Database(\"db\",\n instance=instance.name,\n name=\"db\")\npwd = random.RandomPassword(\"pwd\",\n length=16,\n special=False)\nuser = gcp.sql.User(\"user\",\n name=\"user\",\n instance=instance.name,\n host=\"%\",\n password=pwd.result)\nsource_connection_profile = gcp.datastream.ConnectionProfile(\"source_connection_profile\",\n display_name=\"Source connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n mysql_profile={\n \"hostname\": instance.public_ip_address,\n \"username\": user.name,\n \"password\": user.password,\n })\ndestination_connection_profile = gcp.datastream.ConnectionProfile(\"destination_connection_profile\",\n display_name=\"Connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"destination-profile\",\n bigquery_profile={})\ndefault = gcp.datastream.Stream(\"default\",\n stream_id=\"my-stream\",\n location=\"us-central1\",\n display_name=\"my stream\",\n source_config={\n \"source_connection_profile\": source_connection_profile.id,\n \"mysql_source_config\": {},\n },\n destination_config={\n \"destination_connection_profile\": destination_connection_profile.id,\n \"bigquery_destination_config\": {\n \"source_hierarchy_datasets\": {\n \"dataset_template\": {\n \"location\": \"us-central1\",\n },\n },\n \"append_only\": {},\n },\n },\n backfill_none={})\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Random = Pulumi.Random;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var instance = new Gcp.Sql.DatabaseInstance(\"instance\", new()\n {\n Name = \"my-instance\",\n DatabaseVersion = \"MYSQL_8_0\",\n Region = \"us-central1\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-f1-micro\",\n BackupConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsBackupConfigurationArgs\n {\n Enabled = true,\n BinaryLogEnabled = true,\n },\n IpConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationArgs\n {\n AuthorizedNetworks = new[]\n {\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.71.242.81\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.28.29\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.6.157\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.234.134\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.239.218\",\n },\n },\n },\n },\n DeletionProtection = true,\n });\n\n var db = new Gcp.Sql.Database(\"db\", new()\n {\n Instance = instance.Name,\n Name = \"db\",\n });\n\n var pwd = new Random.RandomPassword(\"pwd\", new()\n {\n Length = 16,\n Special = false,\n });\n\n var user = new Gcp.Sql.User(\"user\", new()\n {\n Name = \"user\",\n Instance = instance.Name,\n Host = \"%\",\n Password = pwd.Result,\n });\n\n var sourceConnectionProfile = new Gcp.Datastream.ConnectionProfile(\"source_connection_profile\", new()\n {\n DisplayName = \"Source connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n MysqlProfile = new Gcp.Datastream.Inputs.ConnectionProfileMysqlProfileArgs\n {\n Hostname = instance.PublicIpAddress,\n Username = user.Name,\n Password = user.Password,\n },\n });\n\n var destinationConnectionProfile = new Gcp.Datastream.ConnectionProfile(\"destination_connection_profile\", new()\n {\n DisplayName = \"Connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-profile\",\n BigqueryProfile = null,\n });\n\n var @default = new Gcp.Datastream.Stream(\"default\", new()\n {\n StreamId = \"my-stream\",\n Location = \"us-central1\",\n DisplayName = \"my stream\",\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = sourceConnectionProfile.Id,\n MysqlSourceConfig = null,\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destinationConnectionProfile.Id,\n BigqueryDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigArgs\n {\n SourceHierarchyDatasets = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs\n {\n DatasetTemplate = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs\n {\n Location = \"us-central1\",\n },\n },\n AppendOnly = null,\n },\n },\n BackfillNone = null,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi-random/sdk/v4/go/random\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinstance, err := sql.NewDatabaseInstance(ctx, \"instance\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"my-instance\"),\n\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_8_0\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-f1-micro\"),\n\t\t\t\tBackupConfiguration: \u0026sql.DatabaseInstanceSettingsBackupConfigurationArgs{\n\t\t\t\t\tEnabled: pulumi.Bool(true),\n\t\t\t\t\tBinaryLogEnabled: pulumi.Bool(true),\n\t\t\t\t},\n\t\t\t\tIpConfiguration: \u0026sql.DatabaseInstanceSettingsIpConfigurationArgs{\n\t\t\t\t\tAuthorizedNetworks: sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArray{\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.71.242.81\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.28.29\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.6.157\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.234.134\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.239.218\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = sql.NewDatabase(ctx, \"db\", \u0026sql.DatabaseArgs{\n\t\t\tInstance: instance.Name,\n\t\t\tName: pulumi.String(\"db\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpwd, err := random.NewRandomPassword(ctx, \"pwd\", \u0026random.RandomPasswordArgs{\n\t\t\tLength: pulumi.Int(16),\n\t\t\tSpecial: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuser, err := sql.NewUser(ctx, \"user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"user\"),\n\t\t\tInstance: instance.Name,\n\t\t\tHost: pulumi.String(\"%\"),\n\t\t\tPassword: pwd.Result,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsourceConnectionProfile, err := datastream.NewConnectionProfile(ctx, \"source_connection_profile\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Source connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tMysqlProfile: \u0026datastream.ConnectionProfileMysqlProfileArgs{\n\t\t\t\tHostname: instance.PublicIpAddress,\n\t\t\t\tUsername: user.Name,\n\t\t\t\tPassword: user.Password,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestinationConnectionProfile, err := datastream.NewConnectionProfile(ctx, \"destination_connection_profile\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-profile\"),\n\t\t\tBigqueryProfile: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"default\", \u0026datastream.StreamArgs{\n\t\t\tStreamId: pulumi.String(\"my-stream\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDisplayName: pulumi.String(\"my stream\"),\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: sourceConnectionProfile.ID(),\n\t\t\t\tMysqlSourceConfig: nil,\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destinationConnectionProfile.ID(),\n\t\t\t\tBigqueryDestinationConfig: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigArgs{\n\t\t\t\t\tSourceHierarchyDatasets: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs{\n\t\t\t\t\t\tDatasetTemplate: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs{\n\t\t\t\t\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tAppendOnly: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillNone: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsBackupConfigurationArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsIpConfigurationArgs;\nimport com.pulumi.gcp.sql.Database;\nimport com.pulumi.gcp.sql.DatabaseArgs;\nimport com.pulumi.random.RandomPassword;\nimport com.pulumi.random.RandomPasswordArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileMysqlProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileBigqueryProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigMysqlSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigAppendOnlyArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillNoneArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var instance = new DatabaseInstance(\"instance\", DatabaseInstanceArgs.builder()\n .name(\"my-instance\")\n .databaseVersion(\"MYSQL_8_0\")\n .region(\"us-central1\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-f1-micro\")\n .backupConfiguration(DatabaseInstanceSettingsBackupConfigurationArgs.builder()\n .enabled(true)\n .binaryLogEnabled(true)\n .build())\n .ipConfiguration(DatabaseInstanceSettingsIpConfigurationArgs.builder()\n .authorizedNetworks( \n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.71.242.81\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.28.29\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.6.157\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.234.134\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.239.218\")\n .build())\n .build())\n .build())\n .deletionProtection(true)\n .build());\n\n var db = new Database(\"db\", DatabaseArgs.builder()\n .instance(instance.name())\n .name(\"db\")\n .build());\n\n var pwd = new RandomPassword(\"pwd\", RandomPasswordArgs.builder()\n .length(16)\n .special(false)\n .build());\n\n var user = new User(\"user\", UserArgs.builder()\n .name(\"user\")\n .instance(instance.name())\n .host(\"%\")\n .password(pwd.result())\n .build());\n\n var sourceConnectionProfile = new ConnectionProfile(\"sourceConnectionProfile\", ConnectionProfileArgs.builder()\n .displayName(\"Source connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .mysqlProfile(ConnectionProfileMysqlProfileArgs.builder()\n .hostname(instance.publicIpAddress())\n .username(user.name())\n .password(user.password())\n .build())\n .build());\n\n var destinationConnectionProfile = new ConnectionProfile(\"destinationConnectionProfile\", ConnectionProfileArgs.builder()\n .displayName(\"Connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"destination-profile\")\n .bigqueryProfile()\n .build());\n\n var default_ = new Stream(\"default\", StreamArgs.builder()\n .streamId(\"my-stream\")\n .location(\"us-central1\")\n .displayName(\"my stream\")\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(sourceConnectionProfile.id())\n .mysqlSourceConfig()\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destinationConnectionProfile.id())\n .bigqueryDestinationConfig(StreamDestinationConfigBigqueryDestinationConfigArgs.builder()\n .sourceHierarchyDatasets(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs.builder()\n .datasetTemplate(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs.builder()\n .location(\"us-central1\")\n .build())\n .build())\n .appendOnly()\n .build())\n .build())\n .backfillNone()\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instance:\n type: gcp:sql:DatabaseInstance\n properties:\n name: my-instance\n databaseVersion: MYSQL_8_0\n region: us-central1\n settings:\n tier: db-f1-micro\n backupConfiguration:\n enabled: true\n binaryLogEnabled: true\n ipConfiguration:\n authorizedNetworks:\n - value: 34.71.242.81\n - value: 34.72.28.29\n - value: 34.67.6.157\n - value: 34.67.234.134\n - value: 34.72.239.218\n deletionProtection: true\n db:\n type: gcp:sql:Database\n properties:\n instance: ${instance.name}\n name: db\n pwd:\n type: random:RandomPassword\n properties:\n length: 16\n special: false\n user:\n type: gcp:sql:User\n properties:\n name: user\n instance: ${instance.name}\n host: '%'\n password: ${pwd.result}\n sourceConnectionProfile:\n type: gcp:datastream:ConnectionProfile\n name: source_connection_profile\n properties:\n displayName: Source connection profile\n location: us-central1\n connectionProfileId: source-profile\n mysqlProfile:\n hostname: ${instance.publicIpAddress}\n username: ${user.name}\n password: ${user.password}\n destinationConnectionProfile:\n type: gcp:datastream:ConnectionProfile\n name: destination_connection_profile\n properties:\n displayName: Connection profile\n location: us-central1\n connectionProfileId: destination-profile\n bigqueryProfile: {}\n default:\n type: gcp:datastream:Stream\n properties:\n streamId: my-stream\n location: us-central1\n displayName: my stream\n sourceConfig:\n sourceConnectionProfile: ${sourceConnectionProfile.id}\n mysqlSourceConfig: {}\n destinationConfig:\n destinationConnectionProfile: ${destinationConnectionProfile.id}\n bigqueryDestinationConfig:\n sourceHierarchyDatasets:\n datasetTemplate:\n location: us-central1\n appendOnly: {}\n backfillNone: {}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nStream can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/streams/{{stream_id}}`\n\n* `{{project}}/{{location}}/{{stream_id}}`\n\n* `{{location}}/{{stream_id}}`\n\nWhen using the `pulumi import` command, Stream can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:datastream/stream:Stream default projects/{{project}}/locations/{{location}}/streams/{{stream_id}}\n```\n\n```sh\n$ pulumi import gcp:datastream/stream:Stream default {{project}}/{{location}}/{{stream_id}}\n```\n\n```sh\n$ pulumi import gcp:datastream/stream:Stream default {{location}}/{{stream_id}}\n```\n\n", + "description": "A resource representing streaming data from a source to a destination.\n\n\nTo get more information about Stream, see:\n\n* [API documentation](https://cloud.google.com/datastream/docs/reference/rest/v1/projects.locations.streams)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/datastream/docs/create-a-stream)\n\n## Example Usage\n\n### Datastream Stream Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as random from \"@pulumi/random\";\n\nconst project = gcp.organizations.getProject({});\nconst instance = new gcp.sql.DatabaseInstance(\"instance\", {\n name: \"my-instance\",\n databaseVersion: \"MYSQL_8_0\",\n region: \"us-central1\",\n settings: {\n tier: \"db-f1-micro\",\n backupConfiguration: {\n enabled: true,\n binaryLogEnabled: true,\n },\n ipConfiguration: {\n authorizedNetworks: [\n {\n value: \"34.71.242.81\",\n },\n {\n value: \"34.72.28.29\",\n },\n {\n value: \"34.67.6.157\",\n },\n {\n value: \"34.67.234.134\",\n },\n {\n value: \"34.72.239.218\",\n },\n ],\n },\n },\n deletionProtection: true,\n});\nconst db = new gcp.sql.Database(\"db\", {\n instance: instance.name,\n name: \"db\",\n});\nconst pwd = new random.RandomPassword(\"pwd\", {\n length: 16,\n special: false,\n});\nconst user = new gcp.sql.User(\"user\", {\n name: \"user\",\n instance: instance.name,\n host: \"%\",\n password: pwd.result,\n});\nconst sourceConnectionProfile = new gcp.datastream.ConnectionProfile(\"source_connection_profile\", {\n displayName: \"Source connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n mysqlProfile: {\n hostname: instance.publicIpAddress,\n username: user.name,\n password: user.password,\n },\n});\nconst bucket = new gcp.storage.Bucket(\"bucket\", {\n name: \"my-bucket\",\n location: \"US\",\n uniformBucketLevelAccess: true,\n});\nconst viewer = new gcp.storage.BucketIAMMember(\"viewer\", {\n bucket: bucket.name,\n role: \"roles/storage.objectViewer\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com`),\n});\nconst creator = new gcp.storage.BucketIAMMember(\"creator\", {\n bucket: bucket.name,\n role: \"roles/storage.objectCreator\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com`),\n});\nconst reader = new gcp.storage.BucketIAMMember(\"reader\", {\n bucket: bucket.name,\n role: \"roles/storage.legacyBucketReader\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com`),\n});\nconst keyUser = new gcp.kms.CryptoKeyIAMMember(\"key_user\", {\n cryptoKeyId: \"kms-name\",\n role: \"roles/cloudkms.cryptoKeyEncrypterDecrypter\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com`),\n});\nconst destinationConnectionProfile = new gcp.datastream.ConnectionProfile(\"destination_connection_profile\", {\n displayName: \"Connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"destination-profile\",\n gcsProfile: {\n bucket: bucket.name,\n rootPath: \"/path\",\n },\n});\nconst _default = new gcp.datastream.Stream(\"default\", {\n streamId: \"my-stream\",\n desiredState: \"NOT_STARTED\",\n location: \"us-central1\",\n displayName: \"my stream\",\n labels: {\n key: \"value\",\n },\n sourceConfig: {\n sourceConnectionProfile: sourceConnectionProfile.id,\n mysqlSourceConfig: {\n includeObjects: {\n mysqlDatabases: [{\n database: \"my-database\",\n mysqlTables: [\n {\n table: \"includedTable\",\n mysqlColumns: [{\n column: \"includedColumn\",\n dataType: \"VARCHAR\",\n collation: \"utf8mb4\",\n primaryKey: false,\n nullable: false,\n ordinalPosition: 0,\n }],\n },\n {\n table: \"includedTable_2\",\n },\n ],\n }],\n },\n excludeObjects: {\n mysqlDatabases: [{\n database: \"my-database\",\n mysqlTables: [{\n table: \"excludedTable\",\n mysqlColumns: [{\n column: \"excludedColumn\",\n dataType: \"VARCHAR\",\n collation: \"utf8mb4\",\n primaryKey: false,\n nullable: false,\n ordinalPosition: 0,\n }],\n }],\n }],\n },\n maxConcurrentCdcTasks: 5,\n },\n },\n destinationConfig: {\n destinationConnectionProfile: destinationConnectionProfile.id,\n gcsDestinationConfig: {\n path: \"mydata\",\n fileRotationMb: 200,\n fileRotationInterval: \"60s\",\n jsonFileFormat: {\n schemaFileFormat: \"NO_SCHEMA_FILE\",\n compression: \"GZIP\",\n },\n },\n },\n backfillAll: {\n mysqlExcludedObjects: {\n mysqlDatabases: [{\n database: \"my-database\",\n mysqlTables: [{\n table: \"excludedTable\",\n mysqlColumns: [{\n column: \"excludedColumn\",\n dataType: \"VARCHAR\",\n collation: \"utf8mb4\",\n primaryKey: false,\n nullable: false,\n ordinalPosition: 0,\n }],\n }],\n }],\n },\n },\n customerManagedEncryptionKey: \"kms-name\",\n}, {\n dependsOn: [keyUser],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_random as random\n\nproject = gcp.organizations.get_project()\ninstance = gcp.sql.DatabaseInstance(\"instance\",\n name=\"my-instance\",\n database_version=\"MYSQL_8_0\",\n region=\"us-central1\",\n settings={\n \"tier\": \"db-f1-micro\",\n \"backup_configuration\": {\n \"enabled\": True,\n \"binary_log_enabled\": True,\n },\n \"ip_configuration\": {\n \"authorized_networks\": [\n {\n \"value\": \"34.71.242.81\",\n },\n {\n \"value\": \"34.72.28.29\",\n },\n {\n \"value\": \"34.67.6.157\",\n },\n {\n \"value\": \"34.67.234.134\",\n },\n {\n \"value\": \"34.72.239.218\",\n },\n ],\n },\n },\n deletion_protection=True)\ndb = gcp.sql.Database(\"db\",\n instance=instance.name,\n name=\"db\")\npwd = random.RandomPassword(\"pwd\",\n length=16,\n special=False)\nuser = gcp.sql.User(\"user\",\n name=\"user\",\n instance=instance.name,\n host=\"%\",\n password=pwd.result)\nsource_connection_profile = gcp.datastream.ConnectionProfile(\"source_connection_profile\",\n display_name=\"Source connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n mysql_profile={\n \"hostname\": instance.public_ip_address,\n \"username\": user.name,\n \"password\": user.password,\n })\nbucket = gcp.storage.Bucket(\"bucket\",\n name=\"my-bucket\",\n location=\"US\",\n uniform_bucket_level_access=True)\nviewer = gcp.storage.BucketIAMMember(\"viewer\",\n bucket=bucket.name,\n role=\"roles/storage.objectViewer\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-datastream.iam.gserviceaccount.com\")\ncreator = gcp.storage.BucketIAMMember(\"creator\",\n bucket=bucket.name,\n role=\"roles/storage.objectCreator\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-datastream.iam.gserviceaccount.com\")\nreader = gcp.storage.BucketIAMMember(\"reader\",\n bucket=bucket.name,\n role=\"roles/storage.legacyBucketReader\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-datastream.iam.gserviceaccount.com\")\nkey_user = gcp.kms.CryptoKeyIAMMember(\"key_user\",\n crypto_key_id=\"kms-name\",\n role=\"roles/cloudkms.cryptoKeyEncrypterDecrypter\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-datastream.iam.gserviceaccount.com\")\ndestination_connection_profile = gcp.datastream.ConnectionProfile(\"destination_connection_profile\",\n display_name=\"Connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"destination-profile\",\n gcs_profile={\n \"bucket\": bucket.name,\n \"root_path\": \"/path\",\n })\ndefault = gcp.datastream.Stream(\"default\",\n stream_id=\"my-stream\",\n desired_state=\"NOT_STARTED\",\n location=\"us-central1\",\n display_name=\"my stream\",\n labels={\n \"key\": \"value\",\n },\n source_config={\n \"source_connection_profile\": source_connection_profile.id,\n \"mysql_source_config\": {\n \"include_objects\": {\n \"mysql_databases\": [{\n \"database\": \"my-database\",\n \"mysql_tables\": [\n {\n \"table\": \"includedTable\",\n \"mysql_columns\": [{\n \"column\": \"includedColumn\",\n \"data_type\": \"VARCHAR\",\n \"collation\": \"utf8mb4\",\n \"primary_key\": False,\n \"nullable\": False,\n \"ordinal_position\": 0,\n }],\n },\n {\n \"table\": \"includedTable_2\",\n },\n ],\n }],\n },\n \"exclude_objects\": {\n \"mysql_databases\": [{\n \"database\": \"my-database\",\n \"mysql_tables\": [{\n \"table\": \"excludedTable\",\n \"mysql_columns\": [{\n \"column\": \"excludedColumn\",\n \"data_type\": \"VARCHAR\",\n \"collation\": \"utf8mb4\",\n \"primary_key\": False,\n \"nullable\": False,\n \"ordinal_position\": 0,\n }],\n }],\n }],\n },\n \"max_concurrent_cdc_tasks\": 5,\n },\n },\n destination_config={\n \"destination_connection_profile\": destination_connection_profile.id,\n \"gcs_destination_config\": {\n \"path\": \"mydata\",\n \"file_rotation_mb\": 200,\n \"file_rotation_interval\": \"60s\",\n \"json_file_format\": {\n \"schema_file_format\": \"NO_SCHEMA_FILE\",\n \"compression\": \"GZIP\",\n },\n },\n },\n backfill_all={\n \"mysql_excluded_objects\": {\n \"mysql_databases\": [{\n \"database\": \"my-database\",\n \"mysql_tables\": [{\n \"table\": \"excludedTable\",\n \"mysql_columns\": [{\n \"column\": \"excludedColumn\",\n \"data_type\": \"VARCHAR\",\n \"collation\": \"utf8mb4\",\n \"primary_key\": False,\n \"nullable\": False,\n \"ordinal_position\": 0,\n }],\n }],\n }],\n },\n },\n customer_managed_encryption_key=\"kms-name\",\n opts = pulumi.ResourceOptions(depends_on=[key_user]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Random = Pulumi.Random;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var instance = new Gcp.Sql.DatabaseInstance(\"instance\", new()\n {\n Name = \"my-instance\",\n DatabaseVersion = \"MYSQL_8_0\",\n Region = \"us-central1\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-f1-micro\",\n BackupConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsBackupConfigurationArgs\n {\n Enabled = true,\n BinaryLogEnabled = true,\n },\n IpConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationArgs\n {\n AuthorizedNetworks = new[]\n {\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.71.242.81\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.28.29\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.6.157\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.234.134\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.239.218\",\n },\n },\n },\n },\n DeletionProtection = true,\n });\n\n var db = new Gcp.Sql.Database(\"db\", new()\n {\n Instance = instance.Name,\n Name = \"db\",\n });\n\n var pwd = new Random.RandomPassword(\"pwd\", new()\n {\n Length = 16,\n Special = false,\n });\n\n var user = new Gcp.Sql.User(\"user\", new()\n {\n Name = \"user\",\n Instance = instance.Name,\n Host = \"%\",\n Password = pwd.Result,\n });\n\n var sourceConnectionProfile = new Gcp.Datastream.ConnectionProfile(\"source_connection_profile\", new()\n {\n DisplayName = \"Source connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n MysqlProfile = new Gcp.Datastream.Inputs.ConnectionProfileMysqlProfileArgs\n {\n Hostname = instance.PublicIpAddress,\n Username = user.Name,\n Password = user.Password,\n },\n });\n\n var bucket = new Gcp.Storage.Bucket(\"bucket\", new()\n {\n Name = \"my-bucket\",\n Location = \"US\",\n UniformBucketLevelAccess = true,\n });\n\n var viewer = new Gcp.Storage.BucketIAMMember(\"viewer\", new()\n {\n Bucket = bucket.Name,\n Role = \"roles/storage.objectViewer\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-datastream.iam.gserviceaccount.com\",\n });\n\n var creator = new Gcp.Storage.BucketIAMMember(\"creator\", new()\n {\n Bucket = bucket.Name,\n Role = \"roles/storage.objectCreator\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-datastream.iam.gserviceaccount.com\",\n });\n\n var reader = new Gcp.Storage.BucketIAMMember(\"reader\", new()\n {\n Bucket = bucket.Name,\n Role = \"roles/storage.legacyBucketReader\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-datastream.iam.gserviceaccount.com\",\n });\n\n var keyUser = new Gcp.Kms.CryptoKeyIAMMember(\"key_user\", new()\n {\n CryptoKeyId = \"kms-name\",\n Role = \"roles/cloudkms.cryptoKeyEncrypterDecrypter\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-datastream.iam.gserviceaccount.com\",\n });\n\n var destinationConnectionProfile = new Gcp.Datastream.ConnectionProfile(\"destination_connection_profile\", new()\n {\n DisplayName = \"Connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-profile\",\n GcsProfile = new Gcp.Datastream.Inputs.ConnectionProfileGcsProfileArgs\n {\n Bucket = bucket.Name,\n RootPath = \"/path\",\n },\n });\n\n var @default = new Gcp.Datastream.Stream(\"default\", new()\n {\n StreamId = \"my-stream\",\n DesiredState = \"NOT_STARTED\",\n Location = \"us-central1\",\n DisplayName = \"my stream\",\n Labels = \n {\n { \"key\", \"value\" },\n },\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = sourceConnectionProfile.Id,\n MysqlSourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigArgs\n {\n IncludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigIncludeObjectsArgs\n {\n MysqlDatabases = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseArgs\n {\n Database = \"my-database\",\n MysqlTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableArgs\n {\n Table = \"includedTable\",\n MysqlColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableMysqlColumnArgs\n {\n Column = \"includedColumn\",\n DataType = \"VARCHAR\",\n Collation = \"utf8mb4\",\n PrimaryKey = false,\n Nullable = false,\n OrdinalPosition = 0,\n },\n },\n },\n new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableArgs\n {\n Table = \"includedTable_2\",\n },\n },\n },\n },\n },\n ExcludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigExcludeObjectsArgs\n {\n MysqlDatabases = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseArgs\n {\n Database = \"my-database\",\n MysqlTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableArgs\n {\n Table = \"excludedTable\",\n MysqlColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableMysqlColumnArgs\n {\n Column = \"excludedColumn\",\n DataType = \"VARCHAR\",\n Collation = \"utf8mb4\",\n PrimaryKey = false,\n Nullable = false,\n OrdinalPosition = 0,\n },\n },\n },\n },\n },\n },\n },\n MaxConcurrentCdcTasks = 5,\n },\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destinationConnectionProfile.Id,\n GcsDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigGcsDestinationConfigArgs\n {\n Path = \"mydata\",\n FileRotationMb = 200,\n FileRotationInterval = \"60s\",\n JsonFileFormat = new Gcp.Datastream.Inputs.StreamDestinationConfigGcsDestinationConfigJsonFileFormatArgs\n {\n SchemaFileFormat = \"NO_SCHEMA_FILE\",\n Compression = \"GZIP\",\n },\n },\n },\n BackfillAll = new Gcp.Datastream.Inputs.StreamBackfillAllArgs\n {\n MysqlExcludedObjects = new Gcp.Datastream.Inputs.StreamBackfillAllMysqlExcludedObjectsArgs\n {\n MysqlDatabases = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseArgs\n {\n Database = \"my-database\",\n MysqlTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableArgs\n {\n Table = \"excludedTable\",\n MysqlColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableMysqlColumnArgs\n {\n Column = \"excludedColumn\",\n DataType = \"VARCHAR\",\n Collation = \"utf8mb4\",\n PrimaryKey = false,\n Nullable = false,\n OrdinalPosition = 0,\n },\n },\n },\n },\n },\n },\n },\n },\n CustomerManagedEncryptionKey = \"kms-name\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n keyUser,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage\"\n\t\"github.com/pulumi/pulumi-random/sdk/v4/go/random\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinstance, err := sql.NewDatabaseInstance(ctx, \"instance\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"my-instance\"),\n\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_8_0\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-f1-micro\"),\n\t\t\t\tBackupConfiguration: \u0026sql.DatabaseInstanceSettingsBackupConfigurationArgs{\n\t\t\t\t\tEnabled: pulumi.Bool(true),\n\t\t\t\t\tBinaryLogEnabled: pulumi.Bool(true),\n\t\t\t\t},\n\t\t\t\tIpConfiguration: \u0026sql.DatabaseInstanceSettingsIpConfigurationArgs{\n\t\t\t\t\tAuthorizedNetworks: sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArray{\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.71.242.81\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.28.29\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.6.157\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.234.134\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.239.218\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = sql.NewDatabase(ctx, \"db\", \u0026sql.DatabaseArgs{\n\t\t\tInstance: instance.Name,\n\t\t\tName: pulumi.String(\"db\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpwd, err := random.NewRandomPassword(ctx, \"pwd\", \u0026random.RandomPasswordArgs{\n\t\t\tLength: pulumi.Int(16),\n\t\t\tSpecial: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuser, err := sql.NewUser(ctx, \"user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"user\"),\n\t\t\tInstance: instance.Name,\n\t\t\tHost: pulumi.String(\"%\"),\n\t\t\tPassword: pwd.Result,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsourceConnectionProfile, err := datastream.NewConnectionProfile(ctx, \"source_connection_profile\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Source connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tMysqlProfile: \u0026datastream.ConnectionProfileMysqlProfileArgs{\n\t\t\t\tHostname: instance.PublicIpAddress,\n\t\t\t\tUsername: user.Name,\n\t\t\t\tPassword: user.Password,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbucket, err := storage.NewBucket(ctx, \"bucket\", \u0026storage.BucketArgs{\n\t\t\tName: pulumi.String(\"my-bucket\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tUniformBucketLevelAccess: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = storage.NewBucketIAMMember(ctx, \"viewer\", \u0026storage.BucketIAMMemberArgs{\n\t\t\tBucket: bucket.Name,\n\t\t\tRole: pulumi.String(\"roles/storage.objectViewer\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-datastream.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = storage.NewBucketIAMMember(ctx, \"creator\", \u0026storage.BucketIAMMemberArgs{\n\t\t\tBucket: bucket.Name,\n\t\t\tRole: pulumi.String(\"roles/storage.objectCreator\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-datastream.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = storage.NewBucketIAMMember(ctx, \"reader\", \u0026storage.BucketIAMMemberArgs{\n\t\t\tBucket: bucket.Name,\n\t\t\tRole: pulumi.String(\"roles/storage.legacyBucketReader\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-datastream.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tkeyUser, err := kms.NewCryptoKeyIAMMember(ctx, \"key_user\", \u0026kms.CryptoKeyIAMMemberArgs{\n\t\t\tCryptoKeyId: pulumi.String(\"kms-name\"),\n\t\t\tRole: pulumi.String(\"roles/cloudkms.cryptoKeyEncrypterDecrypter\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-datastream.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestinationConnectionProfile, err := datastream.NewConnectionProfile(ctx, \"destination_connection_profile\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-profile\"),\n\t\t\tGcsProfile: \u0026datastream.ConnectionProfileGcsProfileArgs{\n\t\t\t\tBucket: bucket.Name,\n\t\t\t\tRootPath: pulumi.String(\"/path\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"default\", \u0026datastream.StreamArgs{\n\t\t\tStreamId: pulumi.String(\"my-stream\"),\n\t\t\tDesiredState: pulumi.String(\"NOT_STARTED\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDisplayName: pulumi.String(\"my stream\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"key\": pulumi.String(\"value\"),\n\t\t\t},\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: sourceConnectionProfile.ID(),\n\t\t\t\tMysqlSourceConfig: \u0026datastream.StreamSourceConfigMysqlSourceConfigArgs{\n\t\t\t\t\tIncludeObjects: \u0026datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsArgs{\n\t\t\t\t\t\tMysqlDatabases: datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseArgs{\n\t\t\t\t\t\t\t\tDatabase: pulumi.String(\"my-database\"),\n\t\t\t\t\t\t\t\tMysqlTables: datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"includedTable\"),\n\t\t\t\t\t\t\t\t\t\tMysqlColumns: datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableMysqlColumnArray{\n\t\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableMysqlColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"includedColumn\"),\n\t\t\t\t\t\t\t\t\t\t\t\tDataType: pulumi.String(\"VARCHAR\"),\n\t\t\t\t\t\t\t\t\t\t\t\tCollation: pulumi.String(\"utf8mb4\"),\n\t\t\t\t\t\t\t\t\t\t\t\tPrimaryKey: pulumi.Bool(false),\n\t\t\t\t\t\t\t\t\t\t\t\tNullable: pulumi.Bool(false),\n\t\t\t\t\t\t\t\t\t\t\t\tOrdinalPosition: pulumi.Int(0),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"includedTable_2\"),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tExcludeObjects: \u0026datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsArgs{\n\t\t\t\t\t\tMysqlDatabases: datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseArgs{\n\t\t\t\t\t\t\t\tDatabase: pulumi.String(\"my-database\"),\n\t\t\t\t\t\t\t\tMysqlTables: datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"excludedTable\"),\n\t\t\t\t\t\t\t\t\t\tMysqlColumns: datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableMysqlColumnArray{\n\t\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableMysqlColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"excludedColumn\"),\n\t\t\t\t\t\t\t\t\t\t\t\tDataType: pulumi.String(\"VARCHAR\"),\n\t\t\t\t\t\t\t\t\t\t\t\tCollation: pulumi.String(\"utf8mb4\"),\n\t\t\t\t\t\t\t\t\t\t\t\tPrimaryKey: pulumi.Bool(false),\n\t\t\t\t\t\t\t\t\t\t\t\tNullable: pulumi.Bool(false),\n\t\t\t\t\t\t\t\t\t\t\t\tOrdinalPosition: pulumi.Int(0),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tMaxConcurrentCdcTasks: pulumi.Int(5),\n\t\t\t\t},\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destinationConnectionProfile.ID(),\n\t\t\t\tGcsDestinationConfig: \u0026datastream.StreamDestinationConfigGcsDestinationConfigArgs{\n\t\t\t\t\tPath: pulumi.String(\"mydata\"),\n\t\t\t\t\tFileRotationMb: pulumi.Int(200),\n\t\t\t\t\tFileRotationInterval: pulumi.String(\"60s\"),\n\t\t\t\t\tJsonFileFormat: \u0026datastream.StreamDestinationConfigGcsDestinationConfigJsonFileFormatArgs{\n\t\t\t\t\t\tSchemaFileFormat: pulumi.String(\"NO_SCHEMA_FILE\"),\n\t\t\t\t\t\tCompression: pulumi.String(\"GZIP\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillAll: \u0026datastream.StreamBackfillAllArgs{\n\t\t\t\tMysqlExcludedObjects: \u0026datastream.StreamBackfillAllMysqlExcludedObjectsArgs{\n\t\t\t\t\tMysqlDatabases: datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseArray{\n\t\t\t\t\t\t\u0026datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseArgs{\n\t\t\t\t\t\t\tDatabase: pulumi.String(\"my-database\"),\n\t\t\t\t\t\t\tMysqlTables: datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableArray{\n\t\t\t\t\t\t\t\t\u0026datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableArgs{\n\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"excludedTable\"),\n\t\t\t\t\t\t\t\t\tMysqlColumns: datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableMysqlColumnArray{\n\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableMysqlColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"excludedColumn\"),\n\t\t\t\t\t\t\t\t\t\t\tDataType: pulumi.String(\"VARCHAR\"),\n\t\t\t\t\t\t\t\t\t\t\tCollation: pulumi.String(\"utf8mb4\"),\n\t\t\t\t\t\t\t\t\t\t\tPrimaryKey: pulumi.Bool(false),\n\t\t\t\t\t\t\t\t\t\t\tNullable: pulumi.Bool(false),\n\t\t\t\t\t\t\t\t\t\t\tOrdinalPosition: pulumi.Int(0),\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tCustomerManagedEncryptionKey: pulumi.String(\"kms-name\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tkeyUser,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsBackupConfigurationArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsIpConfigurationArgs;\nimport com.pulumi.gcp.sql.Database;\nimport com.pulumi.gcp.sql.DatabaseArgs;\nimport com.pulumi.random.RandomPassword;\nimport com.pulumi.random.RandomPasswordArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileMysqlProfileArgs;\nimport com.pulumi.gcp.storage.Bucket;\nimport com.pulumi.gcp.storage.BucketArgs;\nimport com.pulumi.gcp.storage.BucketIAMMember;\nimport com.pulumi.gcp.storage.BucketIAMMemberArgs;\nimport com.pulumi.gcp.kms.CryptoKeyIAMMember;\nimport com.pulumi.gcp.kms.CryptoKeyIAMMemberArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileGcsProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigMysqlSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigMysqlSourceConfigIncludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigMysqlSourceConfigExcludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigGcsDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigGcsDestinationConfigJsonFileFormatArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillAllArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillAllMysqlExcludedObjectsArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var instance = new DatabaseInstance(\"instance\", DatabaseInstanceArgs.builder()\n .name(\"my-instance\")\n .databaseVersion(\"MYSQL_8_0\")\n .region(\"us-central1\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-f1-micro\")\n .backupConfiguration(DatabaseInstanceSettingsBackupConfigurationArgs.builder()\n .enabled(true)\n .binaryLogEnabled(true)\n .build())\n .ipConfiguration(DatabaseInstanceSettingsIpConfigurationArgs.builder()\n .authorizedNetworks( \n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.71.242.81\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.28.29\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.6.157\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.234.134\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.239.218\")\n .build())\n .build())\n .build())\n .deletionProtection(true)\n .build());\n\n var db = new Database(\"db\", DatabaseArgs.builder()\n .instance(instance.name())\n .name(\"db\")\n .build());\n\n var pwd = new RandomPassword(\"pwd\", RandomPasswordArgs.builder()\n .length(16)\n .special(false)\n .build());\n\n var user = new User(\"user\", UserArgs.builder()\n .name(\"user\")\n .instance(instance.name())\n .host(\"%\")\n .password(pwd.result())\n .build());\n\n var sourceConnectionProfile = new ConnectionProfile(\"sourceConnectionProfile\", ConnectionProfileArgs.builder()\n .displayName(\"Source connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .mysqlProfile(ConnectionProfileMysqlProfileArgs.builder()\n .hostname(instance.publicIpAddress())\n .username(user.name())\n .password(user.password())\n .build())\n .build());\n\n var bucket = new Bucket(\"bucket\", BucketArgs.builder()\n .name(\"my-bucket\")\n .location(\"US\")\n .uniformBucketLevelAccess(true)\n .build());\n\n var viewer = new BucketIAMMember(\"viewer\", BucketIAMMemberArgs.builder()\n .bucket(bucket.name())\n .role(\"roles/storage.objectViewer\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-datastream.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var creator = new BucketIAMMember(\"creator\", BucketIAMMemberArgs.builder()\n .bucket(bucket.name())\n .role(\"roles/storage.objectCreator\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-datastream.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var reader = new BucketIAMMember(\"reader\", BucketIAMMemberArgs.builder()\n .bucket(bucket.name())\n .role(\"roles/storage.legacyBucketReader\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-datastream.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var keyUser = new CryptoKeyIAMMember(\"keyUser\", CryptoKeyIAMMemberArgs.builder()\n .cryptoKeyId(\"kms-name\")\n .role(\"roles/cloudkms.cryptoKeyEncrypterDecrypter\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-datastream.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var destinationConnectionProfile = new ConnectionProfile(\"destinationConnectionProfile\", ConnectionProfileArgs.builder()\n .displayName(\"Connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"destination-profile\")\n .gcsProfile(ConnectionProfileGcsProfileArgs.builder()\n .bucket(bucket.name())\n .rootPath(\"/path\")\n .build())\n .build());\n\n var default_ = new Stream(\"default\", StreamArgs.builder()\n .streamId(\"my-stream\")\n .desiredState(\"NOT_STARTED\")\n .location(\"us-central1\")\n .displayName(\"my stream\")\n .labels(Map.of(\"key\", \"value\"))\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(sourceConnectionProfile.id())\n .mysqlSourceConfig(StreamSourceConfigMysqlSourceConfigArgs.builder()\n .includeObjects(StreamSourceConfigMysqlSourceConfigIncludeObjectsArgs.builder()\n .mysqlDatabases(StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseArgs.builder()\n .database(\"my-database\")\n .mysqlTables( \n StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableArgs.builder()\n .table(\"includedTable\")\n .mysqlColumns(StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableMysqlColumnArgs.builder()\n .column(\"includedColumn\")\n .dataType(\"VARCHAR\")\n .collation(\"utf8mb4\")\n .primaryKey(false)\n .nullable(false)\n .ordinalPosition(0)\n .build())\n .build(),\n StreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabaseMysqlTableArgs.builder()\n .table(\"includedTable_2\")\n .build())\n .build())\n .build())\n .excludeObjects(StreamSourceConfigMysqlSourceConfigExcludeObjectsArgs.builder()\n .mysqlDatabases(StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseArgs.builder()\n .database(\"my-database\")\n .mysqlTables(StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableArgs.builder()\n .table(\"excludedTable\")\n .mysqlColumns(StreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabaseMysqlTableMysqlColumnArgs.builder()\n .column(\"excludedColumn\")\n .dataType(\"VARCHAR\")\n .collation(\"utf8mb4\")\n .primaryKey(false)\n .nullable(false)\n .ordinalPosition(0)\n .build())\n .build())\n .build())\n .build())\n .maxConcurrentCdcTasks(5)\n .build())\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destinationConnectionProfile.id())\n .gcsDestinationConfig(StreamDestinationConfigGcsDestinationConfigArgs.builder()\n .path(\"mydata\")\n .fileRotationMb(200)\n .fileRotationInterval(\"60s\")\n .jsonFileFormat(StreamDestinationConfigGcsDestinationConfigJsonFileFormatArgs.builder()\n .schemaFileFormat(\"NO_SCHEMA_FILE\")\n .compression(\"GZIP\")\n .build())\n .build())\n .build())\n .backfillAll(StreamBackfillAllArgs.builder()\n .mysqlExcludedObjects(StreamBackfillAllMysqlExcludedObjectsArgs.builder()\n .mysqlDatabases(StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseArgs.builder()\n .database(\"my-database\")\n .mysqlTables(StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableArgs.builder()\n .table(\"excludedTable\")\n .mysqlColumns(StreamBackfillAllMysqlExcludedObjectsMysqlDatabaseMysqlTableMysqlColumnArgs.builder()\n .column(\"excludedColumn\")\n .dataType(\"VARCHAR\")\n .collation(\"utf8mb4\")\n .primaryKey(false)\n .nullable(false)\n .ordinalPosition(0)\n .build())\n .build())\n .build())\n .build())\n .build())\n .customerManagedEncryptionKey(\"kms-name\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(keyUser)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instance:\n type: gcp:sql:DatabaseInstance\n properties:\n name: my-instance\n databaseVersion: MYSQL_8_0\n region: us-central1\n settings:\n tier: db-f1-micro\n backupConfiguration:\n enabled: true\n binaryLogEnabled: true\n ipConfiguration:\n authorizedNetworks:\n - value: 34.71.242.81\n - value: 34.72.28.29\n - value: 34.67.6.157\n - value: 34.67.234.134\n - value: 34.72.239.218\n deletionProtection: true\n db:\n type: gcp:sql:Database\n properties:\n instance: ${instance.name}\n name: db\n pwd:\n type: random:RandomPassword\n properties:\n length: 16\n special: false\n user:\n type: gcp:sql:User\n properties:\n name: user\n instance: ${instance.name}\n host: '%'\n password: ${pwd.result}\n sourceConnectionProfile:\n type: gcp:datastream:ConnectionProfile\n name: source_connection_profile\n properties:\n displayName: Source connection profile\n location: us-central1\n connectionProfileId: source-profile\n mysqlProfile:\n hostname: ${instance.publicIpAddress}\n username: ${user.name}\n password: ${user.password}\n bucket:\n type: gcp:storage:Bucket\n properties:\n name: my-bucket\n location: US\n uniformBucketLevelAccess: true\n viewer:\n type: gcp:storage:BucketIAMMember\n properties:\n bucket: ${bucket.name}\n role: roles/storage.objectViewer\n member: serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com\n creator:\n type: gcp:storage:BucketIAMMember\n properties:\n bucket: ${bucket.name}\n role: roles/storage.objectCreator\n member: serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com\n reader:\n type: gcp:storage:BucketIAMMember\n properties:\n bucket: ${bucket.name}\n role: roles/storage.legacyBucketReader\n member: serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com\n keyUser:\n type: gcp:kms:CryptoKeyIAMMember\n name: key_user\n properties:\n cryptoKeyId: kms-name\n role: roles/cloudkms.cryptoKeyEncrypterDecrypter\n member: serviceAccount:service-${project.number}@gcp-sa-datastream.iam.gserviceaccount.com\n destinationConnectionProfile:\n type: gcp:datastream:ConnectionProfile\n name: destination_connection_profile\n properties:\n displayName: Connection profile\n location: us-central1\n connectionProfileId: destination-profile\n gcsProfile:\n bucket: ${bucket.name}\n rootPath: /path\n default:\n type: gcp:datastream:Stream\n properties:\n streamId: my-stream\n desiredState: NOT_STARTED\n location: us-central1\n displayName: my stream\n labels:\n key: value\n sourceConfig:\n sourceConnectionProfile: ${sourceConnectionProfile.id}\n mysqlSourceConfig:\n includeObjects:\n mysqlDatabases:\n - database: my-database\n mysqlTables:\n - table: includedTable\n mysqlColumns:\n - column: includedColumn\n dataType: VARCHAR\n collation: utf8mb4\n primaryKey: false\n nullable: false\n ordinalPosition: 0\n - table: includedTable_2\n excludeObjects:\n mysqlDatabases:\n - database: my-database\n mysqlTables:\n - table: excludedTable\n mysqlColumns:\n - column: excludedColumn\n dataType: VARCHAR\n collation: utf8mb4\n primaryKey: false\n nullable: false\n ordinalPosition: 0\n maxConcurrentCdcTasks: 5\n destinationConfig:\n destinationConnectionProfile: ${destinationConnectionProfile.id}\n gcsDestinationConfig:\n path: mydata\n fileRotationMb: 200\n fileRotationInterval: 60s\n jsonFileFormat:\n schemaFileFormat: NO_SCHEMA_FILE\n compression: GZIP\n backfillAll:\n mysqlExcludedObjects:\n mysqlDatabases:\n - database: my-database\n mysqlTables:\n - table: excludedTable\n mysqlColumns:\n - column: excludedColumn\n dataType: VARCHAR\n collation: utf8mb4\n primaryKey: false\n nullable: false\n ordinalPosition: 0\n customerManagedEncryptionKey: kms-name\n options:\n dependson:\n - ${keyUser}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Datastream Stream Postgresql\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst source = new gcp.datastream.ConnectionProfile(\"source\", {\n displayName: \"Postgresql Source\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n postgresqlProfile: {\n hostname: \"hostname\",\n port: 3306,\n username: \"user\",\n password: \"pass\",\n database: \"postgres\",\n },\n});\nconst destination = new gcp.datastream.ConnectionProfile(\"destination\", {\n displayName: \"BigQuery Destination\",\n location: \"us-central1\",\n connectionProfileId: \"destination-profile\",\n bigqueryProfile: {},\n});\nconst _default = new gcp.datastream.Stream(\"default\", {\n displayName: \"Postgres to BigQuery\",\n location: \"us-central1\",\n streamId: \"my-stream\",\n desiredState: \"RUNNING\",\n sourceConfig: {\n sourceConnectionProfile: source.id,\n postgresqlSourceConfig: {\n maxConcurrentBackfillTasks: 12,\n publication: \"publication\",\n replicationSlot: \"replication_slot\",\n includeObjects: {\n postgresqlSchemas: [{\n schema: \"schema\",\n postgresqlTables: [{\n table: \"table\",\n postgresqlColumns: [{\n column: \"column\",\n }],\n }],\n }],\n },\n excludeObjects: {\n postgresqlSchemas: [{\n schema: \"schema\",\n postgresqlTables: [{\n table: \"table\",\n postgresqlColumns: [{\n column: \"column\",\n }],\n }],\n }],\n },\n },\n },\n destinationConfig: {\n destinationConnectionProfile: destination.id,\n bigqueryDestinationConfig: {\n dataFreshness: \"900s\",\n sourceHierarchyDatasets: {\n datasetTemplate: {\n location: \"us-central1\",\n },\n },\n },\n },\n backfillAll: {\n postgresqlExcludedObjects: {\n postgresqlSchemas: [{\n schema: \"schema\",\n postgresqlTables: [{\n table: \"table\",\n postgresqlColumns: [{\n column: \"column\",\n }],\n }],\n }],\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nsource = gcp.datastream.ConnectionProfile(\"source\",\n display_name=\"Postgresql Source\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n postgresql_profile={\n \"hostname\": \"hostname\",\n \"port\": 3306,\n \"username\": \"user\",\n \"password\": \"pass\",\n \"database\": \"postgres\",\n })\ndestination = gcp.datastream.ConnectionProfile(\"destination\",\n display_name=\"BigQuery Destination\",\n location=\"us-central1\",\n connection_profile_id=\"destination-profile\",\n bigquery_profile={})\ndefault = gcp.datastream.Stream(\"default\",\n display_name=\"Postgres to BigQuery\",\n location=\"us-central1\",\n stream_id=\"my-stream\",\n desired_state=\"RUNNING\",\n source_config={\n \"source_connection_profile\": source.id,\n \"postgresql_source_config\": {\n \"max_concurrent_backfill_tasks\": 12,\n \"publication\": \"publication\",\n \"replication_slot\": \"replication_slot\",\n \"include_objects\": {\n \"postgresql_schemas\": [{\n \"schema\": \"schema\",\n \"postgresql_tables\": [{\n \"table\": \"table\",\n \"postgresql_columns\": [{\n \"column\": \"column\",\n }],\n }],\n }],\n },\n \"exclude_objects\": {\n \"postgresql_schemas\": [{\n \"schema\": \"schema\",\n \"postgresql_tables\": [{\n \"table\": \"table\",\n \"postgresql_columns\": [{\n \"column\": \"column\",\n }],\n }],\n }],\n },\n },\n },\n destination_config={\n \"destination_connection_profile\": destination.id,\n \"bigquery_destination_config\": {\n \"data_freshness\": \"900s\",\n \"source_hierarchy_datasets\": {\n \"dataset_template\": {\n \"location\": \"us-central1\",\n },\n },\n },\n },\n backfill_all={\n \"postgresql_excluded_objects\": {\n \"postgresql_schemas\": [{\n \"schema\": \"schema\",\n \"postgresql_tables\": [{\n \"table\": \"table\",\n \"postgresql_columns\": [{\n \"column\": \"column\",\n }],\n }],\n }],\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var source = new Gcp.Datastream.ConnectionProfile(\"source\", new()\n {\n DisplayName = \"Postgresql Source\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n PostgresqlProfile = new Gcp.Datastream.Inputs.ConnectionProfilePostgresqlProfileArgs\n {\n Hostname = \"hostname\",\n Port = 3306,\n Username = \"user\",\n Password = \"pass\",\n Database = \"postgres\",\n },\n });\n\n var destination = new Gcp.Datastream.ConnectionProfile(\"destination\", new()\n {\n DisplayName = \"BigQuery Destination\",\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-profile\",\n BigqueryProfile = null,\n });\n\n var @default = new Gcp.Datastream.Stream(\"default\", new()\n {\n DisplayName = \"Postgres to BigQuery\",\n Location = \"us-central1\",\n StreamId = \"my-stream\",\n DesiredState = \"RUNNING\",\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = source.Id,\n PostgresqlSourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigArgs\n {\n MaxConcurrentBackfillTasks = 12,\n Publication = \"publication\",\n ReplicationSlot = \"replication_slot\",\n IncludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsArgs\n {\n PostgresqlSchemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaArgs\n {\n Schema = \"schema\",\n PostgresqlTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTableArgs\n {\n Table = \"table\",\n PostgresqlColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs\n {\n Column = \"column\",\n },\n },\n },\n },\n },\n },\n },\n ExcludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsArgs\n {\n PostgresqlSchemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaArgs\n {\n Schema = \"schema\",\n PostgresqlTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTableArgs\n {\n Table = \"table\",\n PostgresqlColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs\n {\n Column = \"column\",\n },\n },\n },\n },\n },\n },\n },\n },\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destination.Id,\n BigqueryDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigArgs\n {\n DataFreshness = \"900s\",\n SourceHierarchyDatasets = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs\n {\n DatasetTemplate = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs\n {\n Location = \"us-central1\",\n },\n },\n },\n },\n BackfillAll = new Gcp.Datastream.Inputs.StreamBackfillAllArgs\n {\n PostgresqlExcludedObjects = new Gcp.Datastream.Inputs.StreamBackfillAllPostgresqlExcludedObjectsArgs\n {\n PostgresqlSchemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaArgs\n {\n Schema = \"schema\",\n PostgresqlTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTableArgs\n {\n Table = \"table\",\n PostgresqlColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs\n {\n Column = \"column\",\n },\n },\n },\n },\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tsource, err := datastream.NewConnectionProfile(ctx, \"source\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Postgresql Source\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tPostgresqlProfile: \u0026datastream.ConnectionProfilePostgresqlProfileArgs{\n\t\t\t\tHostname: pulumi.String(\"hostname\"),\n\t\t\t\tPort: pulumi.Int(3306),\n\t\t\t\tUsername: pulumi.String(\"user\"),\n\t\t\t\tPassword: pulumi.String(\"pass\"),\n\t\t\t\tDatabase: pulumi.String(\"postgres\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestination, err := datastream.NewConnectionProfile(ctx, \"destination\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"BigQuery Destination\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-profile\"),\n\t\t\tBigqueryProfile: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"default\", \u0026datastream.StreamArgs{\n\t\t\tDisplayName: pulumi.String(\"Postgres to BigQuery\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tStreamId: pulumi.String(\"my-stream\"),\n\t\t\tDesiredState: pulumi.String(\"RUNNING\"),\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: source.ID(),\n\t\t\t\tPostgresqlSourceConfig: \u0026datastream.StreamSourceConfigPostgresqlSourceConfigArgs{\n\t\t\t\t\tMaxConcurrentBackfillTasks: pulumi.Int(12),\n\t\t\t\t\tPublication: pulumi.String(\"publication\"),\n\t\t\t\t\tReplicationSlot: pulumi.String(\"replication_slot\"),\n\t\t\t\t\tIncludeObjects: \u0026datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsArgs{\n\t\t\t\t\t\tPostgresqlSchemas: datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaArgs{\n\t\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\t\tPostgresqlTables: datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\t\tPostgresqlColumns: datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArray{\n\t\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"column\"),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tExcludeObjects: \u0026datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsArgs{\n\t\t\t\t\t\tPostgresqlSchemas: datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaArgs{\n\t\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\t\tPostgresqlTables: datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\t\tPostgresqlColumns: datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArray{\n\t\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"column\"),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destination.ID(),\n\t\t\t\tBigqueryDestinationConfig: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigArgs{\n\t\t\t\t\tDataFreshness: pulumi.String(\"900s\"),\n\t\t\t\t\tSourceHierarchyDatasets: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs{\n\t\t\t\t\t\tDatasetTemplate: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs{\n\t\t\t\t\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillAll: \u0026datastream.StreamBackfillAllArgs{\n\t\t\t\tPostgresqlExcludedObjects: \u0026datastream.StreamBackfillAllPostgresqlExcludedObjectsArgs{\n\t\t\t\t\tPostgresqlSchemas: datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaArray{\n\t\t\t\t\t\t\u0026datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaArgs{\n\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\tPostgresqlTables: datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTableArray{\n\t\t\t\t\t\t\t\t\u0026datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTableArgs{\n\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\tPostgresqlColumns: datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArray{\n\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"column\"),\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfilePostgresqlProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileBigqueryProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigPostgresqlSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigPostgresqlSourceConfigIncludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigPostgresqlSourceConfigExcludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillAllArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillAllPostgresqlExcludedObjectsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var source = new ConnectionProfile(\"source\", ConnectionProfileArgs.builder()\n .displayName(\"Postgresql Source\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .postgresqlProfile(ConnectionProfilePostgresqlProfileArgs.builder()\n .hostname(\"hostname\")\n .port(3306)\n .username(\"user\")\n .password(\"pass\")\n .database(\"postgres\")\n .build())\n .build());\n\n var destination = new ConnectionProfile(\"destination\", ConnectionProfileArgs.builder()\n .displayName(\"BigQuery Destination\")\n .location(\"us-central1\")\n .connectionProfileId(\"destination-profile\")\n .bigqueryProfile()\n .build());\n\n var default_ = new Stream(\"default\", StreamArgs.builder()\n .displayName(\"Postgres to BigQuery\")\n .location(\"us-central1\")\n .streamId(\"my-stream\")\n .desiredState(\"RUNNING\")\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(source.id())\n .postgresqlSourceConfig(StreamSourceConfigPostgresqlSourceConfigArgs.builder()\n .maxConcurrentBackfillTasks(12)\n .publication(\"publication\")\n .replicationSlot(\"replication_slot\")\n .includeObjects(StreamSourceConfigPostgresqlSourceConfigIncludeObjectsArgs.builder()\n .postgresqlSchemas(StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaArgs.builder()\n .schema(\"schema\")\n .postgresqlTables(StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTableArgs.builder()\n .table(\"table\")\n .postgresqlColumns(StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs.builder()\n .column(\"column\")\n .build())\n .build())\n .build())\n .build())\n .excludeObjects(StreamSourceConfigPostgresqlSourceConfigExcludeObjectsArgs.builder()\n .postgresqlSchemas(StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaArgs.builder()\n .schema(\"schema\")\n .postgresqlTables(StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTableArgs.builder()\n .table(\"table\")\n .postgresqlColumns(StreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs.builder()\n .column(\"column\")\n .build())\n .build())\n .build())\n .build())\n .build())\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destination.id())\n .bigqueryDestinationConfig(StreamDestinationConfigBigqueryDestinationConfigArgs.builder()\n .dataFreshness(\"900s\")\n .sourceHierarchyDatasets(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs.builder()\n .datasetTemplate(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs.builder()\n .location(\"us-central1\")\n .build())\n .build())\n .build())\n .build())\n .backfillAll(StreamBackfillAllArgs.builder()\n .postgresqlExcludedObjects(StreamBackfillAllPostgresqlExcludedObjectsArgs.builder()\n .postgresqlSchemas(StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaArgs.builder()\n .schema(\"schema\")\n .postgresqlTables(StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTableArgs.builder()\n .table(\"table\")\n .postgresqlColumns(StreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgs.builder()\n .column(\"column\")\n .build())\n .build())\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n source:\n type: gcp:datastream:ConnectionProfile\n properties:\n displayName: Postgresql Source\n location: us-central1\n connectionProfileId: source-profile\n postgresqlProfile:\n hostname: hostname\n port: 3306\n username: user\n password: pass\n database: postgres\n destination:\n type: gcp:datastream:ConnectionProfile\n properties:\n displayName: BigQuery Destination\n location: us-central1\n connectionProfileId: destination-profile\n bigqueryProfile: {}\n default:\n type: gcp:datastream:Stream\n properties:\n displayName: Postgres to BigQuery\n location: us-central1\n streamId: my-stream\n desiredState: RUNNING\n sourceConfig:\n sourceConnectionProfile: ${source.id}\n postgresqlSourceConfig:\n maxConcurrentBackfillTasks: 12\n publication: publication\n replicationSlot: replication_slot\n includeObjects:\n postgresqlSchemas:\n - schema: schema\n postgresqlTables:\n - table: table\n postgresqlColumns:\n - column: column\n excludeObjects:\n postgresqlSchemas:\n - schema: schema\n postgresqlTables:\n - table: table\n postgresqlColumns:\n - column: column\n destinationConfig:\n destinationConnectionProfile: ${destination.id}\n bigqueryDestinationConfig:\n dataFreshness: 900s\n sourceHierarchyDatasets:\n datasetTemplate:\n location: us-central1\n backfillAll:\n postgresqlExcludedObjects:\n postgresqlSchemas:\n - schema: schema\n postgresqlTables:\n - table: table\n postgresqlColumns:\n - column: column\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Datastream Stream Oracle\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst source = new gcp.datastream.ConnectionProfile(\"source\", {\n displayName: \"Oracle Source\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n oracleProfile: {\n hostname: \"hostname\",\n port: 1521,\n username: \"user\",\n password: \"pass\",\n databaseService: \"ORCL\",\n },\n});\nconst destination = new gcp.datastream.ConnectionProfile(\"destination\", {\n displayName: \"BigQuery Destination\",\n location: \"us-central1\",\n connectionProfileId: \"destination-profile\",\n bigqueryProfile: {},\n});\nconst stream5 = new gcp.datastream.Stream(\"stream5\", {\n displayName: \"Oracle to BigQuery\",\n location: \"us-central1\",\n streamId: \"my-stream\",\n desiredState: \"RUNNING\",\n sourceConfig: {\n sourceConnectionProfile: source.id,\n oracleSourceConfig: {\n maxConcurrentCdcTasks: 8,\n maxConcurrentBackfillTasks: 12,\n includeObjects: {\n oracleSchemas: [{\n schema: \"schema\",\n oracleTables: [{\n table: \"table\",\n oracleColumns: [{\n column: \"column\",\n }],\n }],\n }],\n },\n excludeObjects: {\n oracleSchemas: [{\n schema: \"schema\",\n oracleTables: [{\n table: \"table\",\n oracleColumns: [{\n column: \"column\",\n }],\n }],\n }],\n },\n dropLargeObjects: {},\n },\n },\n destinationConfig: {\n destinationConnectionProfile: destination.id,\n bigqueryDestinationConfig: {\n dataFreshness: \"900s\",\n sourceHierarchyDatasets: {\n datasetTemplate: {\n location: \"us-central1\",\n },\n },\n },\n },\n backfillAll: {\n oracleExcludedObjects: {\n oracleSchemas: [{\n schema: \"schema\",\n oracleTables: [{\n table: \"table\",\n oracleColumns: [{\n column: \"column\",\n }],\n }],\n }],\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nsource = gcp.datastream.ConnectionProfile(\"source\",\n display_name=\"Oracle Source\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n oracle_profile={\n \"hostname\": \"hostname\",\n \"port\": 1521,\n \"username\": \"user\",\n \"password\": \"pass\",\n \"database_service\": \"ORCL\",\n })\ndestination = gcp.datastream.ConnectionProfile(\"destination\",\n display_name=\"BigQuery Destination\",\n location=\"us-central1\",\n connection_profile_id=\"destination-profile\",\n bigquery_profile={})\nstream5 = gcp.datastream.Stream(\"stream5\",\n display_name=\"Oracle to BigQuery\",\n location=\"us-central1\",\n stream_id=\"my-stream\",\n desired_state=\"RUNNING\",\n source_config={\n \"source_connection_profile\": source.id,\n \"oracle_source_config\": {\n \"max_concurrent_cdc_tasks\": 8,\n \"max_concurrent_backfill_tasks\": 12,\n \"include_objects\": {\n \"oracle_schemas\": [{\n \"schema\": \"schema\",\n \"oracle_tables\": [{\n \"table\": \"table\",\n \"oracle_columns\": [{\n \"column\": \"column\",\n }],\n }],\n }],\n },\n \"exclude_objects\": {\n \"oracle_schemas\": [{\n \"schema\": \"schema\",\n \"oracle_tables\": [{\n \"table\": \"table\",\n \"oracle_columns\": [{\n \"column\": \"column\",\n }],\n }],\n }],\n },\n \"drop_large_objects\": {},\n },\n },\n destination_config={\n \"destination_connection_profile\": destination.id,\n \"bigquery_destination_config\": {\n \"data_freshness\": \"900s\",\n \"source_hierarchy_datasets\": {\n \"dataset_template\": {\n \"location\": \"us-central1\",\n },\n },\n },\n },\n backfill_all={\n \"oracle_excluded_objects\": {\n \"oracle_schemas\": [{\n \"schema\": \"schema\",\n \"oracle_tables\": [{\n \"table\": \"table\",\n \"oracle_columns\": [{\n \"column\": \"column\",\n }],\n }],\n }],\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var source = new Gcp.Datastream.ConnectionProfile(\"source\", new()\n {\n DisplayName = \"Oracle Source\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n OracleProfile = new Gcp.Datastream.Inputs.ConnectionProfileOracleProfileArgs\n {\n Hostname = \"hostname\",\n Port = 1521,\n Username = \"user\",\n Password = \"pass\",\n DatabaseService = \"ORCL\",\n },\n });\n\n var destination = new Gcp.Datastream.ConnectionProfile(\"destination\", new()\n {\n DisplayName = \"BigQuery Destination\",\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-profile\",\n BigqueryProfile = null,\n });\n\n var stream5 = new Gcp.Datastream.Stream(\"stream5\", new()\n {\n DisplayName = \"Oracle to BigQuery\",\n Location = \"us-central1\",\n StreamId = \"my-stream\",\n DesiredState = \"RUNNING\",\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = source.Id,\n OracleSourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigArgs\n {\n MaxConcurrentCdcTasks = 8,\n MaxConcurrentBackfillTasks = 12,\n IncludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigIncludeObjectsArgs\n {\n OracleSchemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaArgs\n {\n Schema = \"schema\",\n OracleTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableArgs\n {\n Table = \"table\",\n OracleColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableOracleColumnArgs\n {\n Column = \"column\",\n },\n },\n },\n },\n },\n },\n },\n ExcludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigExcludeObjectsArgs\n {\n OracleSchemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaArgs\n {\n Schema = \"schema\",\n OracleTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableArgs\n {\n Table = \"table\",\n OracleColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableOracleColumnArgs\n {\n Column = \"column\",\n },\n },\n },\n },\n },\n },\n },\n DropLargeObjects = null,\n },\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destination.Id,\n BigqueryDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigArgs\n {\n DataFreshness = \"900s\",\n SourceHierarchyDatasets = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs\n {\n DatasetTemplate = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs\n {\n Location = \"us-central1\",\n },\n },\n },\n },\n BackfillAll = new Gcp.Datastream.Inputs.StreamBackfillAllArgs\n {\n OracleExcludedObjects = new Gcp.Datastream.Inputs.StreamBackfillAllOracleExcludedObjectsArgs\n {\n OracleSchemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllOracleExcludedObjectsOracleSchemaArgs\n {\n Schema = \"schema\",\n OracleTables = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableArgs\n {\n Table = \"table\",\n OracleColumns = new[]\n {\n new Gcp.Datastream.Inputs.StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableOracleColumnArgs\n {\n Column = \"column\",\n },\n },\n },\n },\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tsource, err := datastream.NewConnectionProfile(ctx, \"source\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Oracle Source\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tOracleProfile: \u0026datastream.ConnectionProfileOracleProfileArgs{\n\t\t\t\tHostname: pulumi.String(\"hostname\"),\n\t\t\t\tPort: pulumi.Int(1521),\n\t\t\t\tUsername: pulumi.String(\"user\"),\n\t\t\t\tPassword: pulumi.String(\"pass\"),\n\t\t\t\tDatabaseService: pulumi.String(\"ORCL\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestination, err := datastream.NewConnectionProfile(ctx, \"destination\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"BigQuery Destination\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-profile\"),\n\t\t\tBigqueryProfile: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"stream5\", \u0026datastream.StreamArgs{\n\t\t\tDisplayName: pulumi.String(\"Oracle to BigQuery\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tStreamId: pulumi.String(\"my-stream\"),\n\t\t\tDesiredState: pulumi.String(\"RUNNING\"),\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: source.ID(),\n\t\t\t\tOracleSourceConfig: \u0026datastream.StreamSourceConfigOracleSourceConfigArgs{\n\t\t\t\t\tMaxConcurrentCdcTasks: pulumi.Int(8),\n\t\t\t\t\tMaxConcurrentBackfillTasks: pulumi.Int(12),\n\t\t\t\t\tIncludeObjects: \u0026datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsArgs{\n\t\t\t\t\t\tOracleSchemas: datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaArgs{\n\t\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\t\tOracleTables: datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\t\tOracleColumns: datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableOracleColumnArray{\n\t\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableOracleColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"column\"),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tExcludeObjects: \u0026datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsArgs{\n\t\t\t\t\t\tOracleSchemas: datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaArgs{\n\t\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\t\tOracleTables: datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\t\tOracleColumns: datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableOracleColumnArray{\n\t\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableOracleColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"column\"),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tDropLargeObjects: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destination.ID(),\n\t\t\t\tBigqueryDestinationConfig: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigArgs{\n\t\t\t\t\tDataFreshness: pulumi.String(\"900s\"),\n\t\t\t\t\tSourceHierarchyDatasets: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs{\n\t\t\t\t\t\tDatasetTemplate: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs{\n\t\t\t\t\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillAll: \u0026datastream.StreamBackfillAllArgs{\n\t\t\t\tOracleExcludedObjects: \u0026datastream.StreamBackfillAllOracleExcludedObjectsArgs{\n\t\t\t\t\tOracleSchemas: datastream.StreamBackfillAllOracleExcludedObjectsOracleSchemaArray{\n\t\t\t\t\t\t\u0026datastream.StreamBackfillAllOracleExcludedObjectsOracleSchemaArgs{\n\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\tOracleTables: datastream.StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableArray{\n\t\t\t\t\t\t\t\t\u0026datastream.StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableArgs{\n\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\tOracleColumns: datastream.StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableOracleColumnArray{\n\t\t\t\t\t\t\t\t\t\t\u0026datastream.StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableOracleColumnArgs{\n\t\t\t\t\t\t\t\t\t\t\tColumn: pulumi.String(\"column\"),\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileOracleProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileBigqueryProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigOracleSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigOracleSourceConfigIncludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigOracleSourceConfigExcludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigOracleSourceConfigDropLargeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillAllArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillAllOracleExcludedObjectsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var source = new ConnectionProfile(\"source\", ConnectionProfileArgs.builder()\n .displayName(\"Oracle Source\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .oracleProfile(ConnectionProfileOracleProfileArgs.builder()\n .hostname(\"hostname\")\n .port(1521)\n .username(\"user\")\n .password(\"pass\")\n .databaseService(\"ORCL\")\n .build())\n .build());\n\n var destination = new ConnectionProfile(\"destination\", ConnectionProfileArgs.builder()\n .displayName(\"BigQuery Destination\")\n .location(\"us-central1\")\n .connectionProfileId(\"destination-profile\")\n .bigqueryProfile()\n .build());\n\n var stream5 = new Stream(\"stream5\", StreamArgs.builder()\n .displayName(\"Oracle to BigQuery\")\n .location(\"us-central1\")\n .streamId(\"my-stream\")\n .desiredState(\"RUNNING\")\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(source.id())\n .oracleSourceConfig(StreamSourceConfigOracleSourceConfigArgs.builder()\n .maxConcurrentCdcTasks(8)\n .maxConcurrentBackfillTasks(12)\n .includeObjects(StreamSourceConfigOracleSourceConfigIncludeObjectsArgs.builder()\n .oracleSchemas(StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaArgs.builder()\n .schema(\"schema\")\n .oracleTables(StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableArgs.builder()\n .table(\"table\")\n .oracleColumns(StreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemaOracleTableOracleColumnArgs.builder()\n .column(\"column\")\n .build())\n .build())\n .build())\n .build())\n .excludeObjects(StreamSourceConfigOracleSourceConfigExcludeObjectsArgs.builder()\n .oracleSchemas(StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaArgs.builder()\n .schema(\"schema\")\n .oracleTables(StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableArgs.builder()\n .table(\"table\")\n .oracleColumns(StreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemaOracleTableOracleColumnArgs.builder()\n .column(\"column\")\n .build())\n .build())\n .build())\n .build())\n .dropLargeObjects()\n .build())\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destination.id())\n .bigqueryDestinationConfig(StreamDestinationConfigBigqueryDestinationConfigArgs.builder()\n .dataFreshness(\"900s\")\n .sourceHierarchyDatasets(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs.builder()\n .datasetTemplate(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs.builder()\n .location(\"us-central1\")\n .build())\n .build())\n .build())\n .build())\n .backfillAll(StreamBackfillAllArgs.builder()\n .oracleExcludedObjects(StreamBackfillAllOracleExcludedObjectsArgs.builder()\n .oracleSchemas(StreamBackfillAllOracleExcludedObjectsOracleSchemaArgs.builder()\n .schema(\"schema\")\n .oracleTables(StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableArgs.builder()\n .table(\"table\")\n .oracleColumns(StreamBackfillAllOracleExcludedObjectsOracleSchemaOracleTableOracleColumnArgs.builder()\n .column(\"column\")\n .build())\n .build())\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n source:\n type: gcp:datastream:ConnectionProfile\n properties:\n displayName: Oracle Source\n location: us-central1\n connectionProfileId: source-profile\n oracleProfile:\n hostname: hostname\n port: 1521\n username: user\n password: pass\n databaseService: ORCL\n destination:\n type: gcp:datastream:ConnectionProfile\n properties:\n displayName: BigQuery Destination\n location: us-central1\n connectionProfileId: destination-profile\n bigqueryProfile: {}\n stream5:\n type: gcp:datastream:Stream\n properties:\n displayName: Oracle to BigQuery\n location: us-central1\n streamId: my-stream\n desiredState: RUNNING\n sourceConfig:\n sourceConnectionProfile: ${source.id}\n oracleSourceConfig:\n maxConcurrentCdcTasks: 8\n maxConcurrentBackfillTasks: 12\n includeObjects:\n oracleSchemas:\n - schema: schema\n oracleTables:\n - table: table\n oracleColumns:\n - column: column\n excludeObjects:\n oracleSchemas:\n - schema: schema\n oracleTables:\n - table: table\n oracleColumns:\n - column: column\n dropLargeObjects: {}\n destinationConfig:\n destinationConnectionProfile: ${destination.id}\n bigqueryDestinationConfig:\n dataFreshness: 900s\n sourceHierarchyDatasets:\n datasetTemplate:\n location: us-central1\n backfillAll:\n oracleExcludedObjects:\n oracleSchemas:\n - schema: schema\n oracleTables:\n - table: table\n oracleColumns:\n - column: column\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Datastream Stream Sql Server\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst instance = new gcp.sql.DatabaseInstance(\"instance\", {\n name: \"sql-server\",\n databaseVersion: \"SQLSERVER_2019_STANDARD\",\n region: \"us-central1\",\n rootPassword: \"root-password\",\n deletionProtection: true,\n settings: {\n tier: \"db-custom-2-4096\",\n ipConfiguration: {\n authorizedNetworks: [\n {\n value: \"34.71.242.81\",\n },\n {\n value: \"34.72.28.29\",\n },\n {\n value: \"34.67.6.157\",\n },\n {\n value: \"34.67.234.134\",\n },\n {\n value: \"34.72.239.218\",\n },\n ],\n },\n },\n});\nconst user = new gcp.sql.User(\"user\", {\n name: \"user\",\n instance: instance.name,\n password: \"password\",\n});\nconst db = new gcp.sql.Database(\"db\", {\n name: \"db\",\n instance: instance.name,\n}, {\n dependsOn: [user],\n});\nconst source = new gcp.datastream.ConnectionProfile(\"source\", {\n displayName: \"SQL Server Source\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n sqlServerProfile: {\n hostname: instance.publicIpAddress,\n port: 1433,\n username: user.name,\n password: user.password,\n database: db.name,\n },\n});\nconst destination = new gcp.datastream.ConnectionProfile(\"destination\", {\n displayName: \"BigQuery Destination\",\n location: \"us-central1\",\n connectionProfileId: \"destination-profile\",\n bigqueryProfile: {},\n});\nconst _default = new gcp.datastream.Stream(\"default\", {\n displayName: \"SQL Server to BigQuery\",\n location: \"us-central1\",\n streamId: \"stream\",\n sourceConfig: {\n sourceConnectionProfile: source.id,\n sqlServerSourceConfig: {\n includeObjects: {\n schemas: [{\n schema: \"schema\",\n tables: [{\n table: \"table\",\n }],\n }],\n },\n transactionLogs: {},\n },\n },\n destinationConfig: {\n destinationConnectionProfile: destination.id,\n bigqueryDestinationConfig: {\n dataFreshness: \"900s\",\n sourceHierarchyDatasets: {\n datasetTemplate: {\n location: \"us-central1\",\n },\n },\n },\n },\n backfillNone: {},\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ninstance = gcp.sql.DatabaseInstance(\"instance\",\n name=\"sql-server\",\n database_version=\"SQLSERVER_2019_STANDARD\",\n region=\"us-central1\",\n root_password=\"root-password\",\n deletion_protection=True,\n settings={\n \"tier\": \"db-custom-2-4096\",\n \"ip_configuration\": {\n \"authorized_networks\": [\n {\n \"value\": \"34.71.242.81\",\n },\n {\n \"value\": \"34.72.28.29\",\n },\n {\n \"value\": \"34.67.6.157\",\n },\n {\n \"value\": \"34.67.234.134\",\n },\n {\n \"value\": \"34.72.239.218\",\n },\n ],\n },\n })\nuser = gcp.sql.User(\"user\",\n name=\"user\",\n instance=instance.name,\n password=\"password\")\ndb = gcp.sql.Database(\"db\",\n name=\"db\",\n instance=instance.name,\n opts = pulumi.ResourceOptions(depends_on=[user]))\nsource = gcp.datastream.ConnectionProfile(\"source\",\n display_name=\"SQL Server Source\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n sql_server_profile={\n \"hostname\": instance.public_ip_address,\n \"port\": 1433,\n \"username\": user.name,\n \"password\": user.password,\n \"database\": db.name,\n })\ndestination = gcp.datastream.ConnectionProfile(\"destination\",\n display_name=\"BigQuery Destination\",\n location=\"us-central1\",\n connection_profile_id=\"destination-profile\",\n bigquery_profile={})\ndefault = gcp.datastream.Stream(\"default\",\n display_name=\"SQL Server to BigQuery\",\n location=\"us-central1\",\n stream_id=\"stream\",\n source_config={\n \"source_connection_profile\": source.id,\n \"sql_server_source_config\": {\n \"include_objects\": {\n \"schemas\": [{\n \"schema\": \"schema\",\n \"tables\": [{\n \"table\": \"table\",\n }],\n }],\n },\n \"transaction_logs\": {},\n },\n },\n destination_config={\n \"destination_connection_profile\": destination.id,\n \"bigquery_destination_config\": {\n \"data_freshness\": \"900s\",\n \"source_hierarchy_datasets\": {\n \"dataset_template\": {\n \"location\": \"us-central1\",\n },\n },\n },\n },\n backfill_none={})\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var instance = new Gcp.Sql.DatabaseInstance(\"instance\", new()\n {\n Name = \"sql-server\",\n DatabaseVersion = \"SQLSERVER_2019_STANDARD\",\n Region = \"us-central1\",\n RootPassword = \"root-password\",\n DeletionProtection = true,\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-custom-2-4096\",\n IpConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationArgs\n {\n AuthorizedNetworks = new[]\n {\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.71.242.81\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.28.29\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.6.157\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.234.134\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.239.218\",\n },\n },\n },\n },\n });\n\n var user = new Gcp.Sql.User(\"user\", new()\n {\n Name = \"user\",\n Instance = instance.Name,\n Password = \"password\",\n });\n\n var db = new Gcp.Sql.Database(\"db\", new()\n {\n Name = \"db\",\n Instance = instance.Name,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n user,\n },\n });\n\n var source = new Gcp.Datastream.ConnectionProfile(\"source\", new()\n {\n DisplayName = \"SQL Server Source\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n SqlServerProfile = new Gcp.Datastream.Inputs.ConnectionProfileSqlServerProfileArgs\n {\n Hostname = instance.PublicIpAddress,\n Port = 1433,\n Username = user.Name,\n Password = user.Password,\n Database = db.Name,\n },\n });\n\n var destination = new Gcp.Datastream.ConnectionProfile(\"destination\", new()\n {\n DisplayName = \"BigQuery Destination\",\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-profile\",\n BigqueryProfile = null,\n });\n\n var @default = new Gcp.Datastream.Stream(\"default\", new()\n {\n DisplayName = \"SQL Server to BigQuery\",\n Location = \"us-central1\",\n StreamId = \"stream\",\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = source.Id,\n SqlServerSourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigArgs\n {\n IncludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs\n {\n Schemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArgs\n {\n Schema = \"schema\",\n Tables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArgs\n {\n Table = \"table\",\n },\n },\n },\n },\n },\n TransactionLogs = null,\n },\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destination.Id,\n BigqueryDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigArgs\n {\n DataFreshness = \"900s\",\n SourceHierarchyDatasets = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs\n {\n DatasetTemplate = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs\n {\n Location = \"us-central1\",\n },\n },\n },\n },\n BackfillNone = null,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tinstance, err := sql.NewDatabaseInstance(ctx, \"instance\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"sql-server\"),\n\t\t\tDatabaseVersion: pulumi.String(\"SQLSERVER_2019_STANDARD\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tRootPassword: pulumi.String(\"root-password\"),\n\t\t\tDeletionProtection: pulumi.Bool(true),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-custom-2-4096\"),\n\t\t\t\tIpConfiguration: \u0026sql.DatabaseInstanceSettingsIpConfigurationArgs{\n\t\t\t\t\tAuthorizedNetworks: sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArray{\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.71.242.81\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.28.29\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.6.157\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.234.134\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.239.218\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuser, err := sql.NewUser(ctx, \"user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"user\"),\n\t\t\tInstance: instance.Name,\n\t\t\tPassword: pulumi.String(\"password\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdb, err := sql.NewDatabase(ctx, \"db\", \u0026sql.DatabaseArgs{\n\t\t\tName: pulumi.String(\"db\"),\n\t\t\tInstance: instance.Name,\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tuser,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsource, err := datastream.NewConnectionProfile(ctx, \"source\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"SQL Server Source\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tSqlServerProfile: \u0026datastream.ConnectionProfileSqlServerProfileArgs{\n\t\t\t\tHostname: instance.PublicIpAddress,\n\t\t\t\tPort: pulumi.Int(1433),\n\t\t\t\tUsername: user.Name,\n\t\t\t\tPassword: user.Password,\n\t\t\t\tDatabase: db.Name,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestination, err := datastream.NewConnectionProfile(ctx, \"destination\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"BigQuery Destination\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-profile\"),\n\t\t\tBigqueryProfile: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"default\", \u0026datastream.StreamArgs{\n\t\t\tDisplayName: pulumi.String(\"SQL Server to BigQuery\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tStreamId: pulumi.String(\"stream\"),\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: source.ID(),\n\t\t\t\tSqlServerSourceConfig: \u0026datastream.StreamSourceConfigSqlServerSourceConfigArgs{\n\t\t\t\t\tIncludeObjects: \u0026datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs{\n\t\t\t\t\t\tSchemas: datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArgs{\n\t\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\t\tTables: datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tTransactionLogs: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destination.ID(),\n\t\t\t\tBigqueryDestinationConfig: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigArgs{\n\t\t\t\t\tDataFreshness: pulumi.String(\"900s\"),\n\t\t\t\t\tSourceHierarchyDatasets: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs{\n\t\t\t\t\t\tDatasetTemplate: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs{\n\t\t\t\t\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillNone: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsIpConfigurationArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.sql.Database;\nimport com.pulumi.gcp.sql.DatabaseArgs;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileSqlServerProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileBigqueryProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillNoneArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var instance = new DatabaseInstance(\"instance\", DatabaseInstanceArgs.builder()\n .name(\"sql-server\")\n .databaseVersion(\"SQLSERVER_2019_STANDARD\")\n .region(\"us-central1\")\n .rootPassword(\"root-password\")\n .deletionProtection(\"true\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-custom-2-4096\")\n .ipConfiguration(DatabaseInstanceSettingsIpConfigurationArgs.builder()\n .authorizedNetworks( \n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.71.242.81\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.28.29\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.6.157\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.234.134\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.239.218\")\n .build())\n .build())\n .build())\n .build());\n\n var user = new User(\"user\", UserArgs.builder()\n .name(\"user\")\n .instance(instance.name())\n .password(\"password\")\n .build());\n\n var db = new Database(\"db\", DatabaseArgs.builder()\n .name(\"db\")\n .instance(instance.name())\n .build(), CustomResourceOptions.builder()\n .dependsOn(user)\n .build());\n\n var source = new ConnectionProfile(\"source\", ConnectionProfileArgs.builder()\n .displayName(\"SQL Server Source\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .sqlServerProfile(ConnectionProfileSqlServerProfileArgs.builder()\n .hostname(instance.publicIpAddress())\n .port(1433)\n .username(user.name())\n .password(user.password())\n .database(db.name())\n .build())\n .build());\n\n var destination = new ConnectionProfile(\"destination\", ConnectionProfileArgs.builder()\n .displayName(\"BigQuery Destination\")\n .location(\"us-central1\")\n .connectionProfileId(\"destination-profile\")\n .bigqueryProfile()\n .build());\n\n var default_ = new Stream(\"default\", StreamArgs.builder()\n .displayName(\"SQL Server to BigQuery\")\n .location(\"us-central1\")\n .streamId(\"stream\")\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(source.id())\n .sqlServerSourceConfig(StreamSourceConfigSqlServerSourceConfigArgs.builder()\n .includeObjects(StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs.builder()\n .schemas(StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArgs.builder()\n .schema(\"schema\")\n .tables(StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArgs.builder()\n .table(\"table\")\n .build())\n .build())\n .build())\n .transactionLogs()\n .build())\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destination.id())\n .bigqueryDestinationConfig(StreamDestinationConfigBigqueryDestinationConfigArgs.builder()\n .dataFreshness(\"900s\")\n .sourceHierarchyDatasets(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs.builder()\n .datasetTemplate(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs.builder()\n .location(\"us-central1\")\n .build())\n .build())\n .build())\n .build())\n .backfillNone()\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instance:\n type: gcp:sql:DatabaseInstance\n properties:\n name: sql-server\n databaseVersion: SQLSERVER_2019_STANDARD\n region: us-central1\n rootPassword: root-password\n deletionProtection: 'true'\n settings:\n tier: db-custom-2-4096\n ipConfiguration:\n authorizedNetworks:\n - value: 34.71.242.81\n - value: 34.72.28.29\n - value: 34.67.6.157\n - value: 34.67.234.134\n - value: 34.72.239.218\n db:\n type: gcp:sql:Database\n properties:\n name: db\n instance: ${instance.name}\n options:\n dependson:\n - ${user}\n user:\n type: gcp:sql:User\n properties:\n name: user\n instance: ${instance.name}\n password: password\n source:\n type: gcp:datastream:ConnectionProfile\n properties:\n displayName: SQL Server Source\n location: us-central1\n connectionProfileId: source-profile\n sqlServerProfile:\n hostname: ${instance.publicIpAddress}\n port: 1433\n username: ${user.name}\n password: ${user.password}\n database: ${db.name}\n destination:\n type: gcp:datastream:ConnectionProfile\n properties:\n displayName: BigQuery Destination\n location: us-central1\n connectionProfileId: destination-profile\n bigqueryProfile: {}\n default:\n type: gcp:datastream:Stream\n properties:\n displayName: SQL Server to BigQuery\n location: us-central1\n streamId: stream\n sourceConfig:\n sourceConnectionProfile: ${source.id}\n sqlServerSourceConfig:\n includeObjects:\n schemas:\n - schema: schema\n tables:\n - table: table\n transactionLogs: {}\n destinationConfig:\n destinationConnectionProfile: ${destination.id}\n bigqueryDestinationConfig:\n dataFreshness: 900s\n sourceHierarchyDatasets:\n datasetTemplate:\n location: us-central1\n backfillNone: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Datastream Stream Sql Server Change Tables\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst instance = new gcp.sql.DatabaseInstance(\"instance\", {\n name: \"sql-server\",\n databaseVersion: \"SQLSERVER_2019_STANDARD\",\n region: \"us-central1\",\n rootPassword: \"root-password\",\n deletionProtection: true,\n settings: {\n tier: \"db-custom-2-4096\",\n ipConfiguration: {\n authorizedNetworks: [\n {\n value: \"34.71.242.81\",\n },\n {\n value: \"34.72.28.29\",\n },\n {\n value: \"34.67.6.157\",\n },\n {\n value: \"34.67.234.134\",\n },\n {\n value: \"34.72.239.218\",\n },\n ],\n },\n },\n});\nconst user = new gcp.sql.User(\"user\", {\n name: \"user\",\n instance: instance.name,\n password: \"password\",\n});\nconst db = new gcp.sql.Database(\"db\", {\n name: \"db\",\n instance: instance.name,\n}, {\n dependsOn: [user],\n});\nconst source = new gcp.datastream.ConnectionProfile(\"source\", {\n displayName: \"SQL Server Source\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n sqlServerProfile: {\n hostname: instance.publicIpAddress,\n port: 1433,\n username: user.name,\n password: user.password,\n database: db.name,\n },\n});\nconst destination = new gcp.datastream.ConnectionProfile(\"destination\", {\n displayName: \"BigQuery Destination\",\n location: \"us-central1\",\n connectionProfileId: \"destination-profile\",\n bigqueryProfile: {},\n});\nconst _default = new gcp.datastream.Stream(\"default\", {\n displayName: \"SQL Server to BigQuery\",\n location: \"us-central1\",\n streamId: \"stream\",\n sourceConfig: {\n sourceConnectionProfile: source.id,\n sqlServerSourceConfig: {\n includeObjects: {\n schemas: [{\n schema: \"schema\",\n tables: [{\n table: \"table\",\n }],\n }],\n },\n changeTables: {},\n },\n },\n destinationConfig: {\n destinationConnectionProfile: destination.id,\n bigqueryDestinationConfig: {\n dataFreshness: \"900s\",\n sourceHierarchyDatasets: {\n datasetTemplate: {\n location: \"us-central1\",\n },\n },\n },\n },\n backfillNone: {},\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ninstance = gcp.sql.DatabaseInstance(\"instance\",\n name=\"sql-server\",\n database_version=\"SQLSERVER_2019_STANDARD\",\n region=\"us-central1\",\n root_password=\"root-password\",\n deletion_protection=True,\n settings={\n \"tier\": \"db-custom-2-4096\",\n \"ip_configuration\": {\n \"authorized_networks\": [\n {\n \"value\": \"34.71.242.81\",\n },\n {\n \"value\": \"34.72.28.29\",\n },\n {\n \"value\": \"34.67.6.157\",\n },\n {\n \"value\": \"34.67.234.134\",\n },\n {\n \"value\": \"34.72.239.218\",\n },\n ],\n },\n })\nuser = gcp.sql.User(\"user\",\n name=\"user\",\n instance=instance.name,\n password=\"password\")\ndb = gcp.sql.Database(\"db\",\n name=\"db\",\n instance=instance.name,\n opts = pulumi.ResourceOptions(depends_on=[user]))\nsource = gcp.datastream.ConnectionProfile(\"source\",\n display_name=\"SQL Server Source\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n sql_server_profile={\n \"hostname\": instance.public_ip_address,\n \"port\": 1433,\n \"username\": user.name,\n \"password\": user.password,\n \"database\": db.name,\n })\ndestination = gcp.datastream.ConnectionProfile(\"destination\",\n display_name=\"BigQuery Destination\",\n location=\"us-central1\",\n connection_profile_id=\"destination-profile\",\n bigquery_profile={})\ndefault = gcp.datastream.Stream(\"default\",\n display_name=\"SQL Server to BigQuery\",\n location=\"us-central1\",\n stream_id=\"stream\",\n source_config={\n \"source_connection_profile\": source.id,\n \"sql_server_source_config\": {\n \"include_objects\": {\n \"schemas\": [{\n \"schema\": \"schema\",\n \"tables\": [{\n \"table\": \"table\",\n }],\n }],\n },\n \"change_tables\": {},\n },\n },\n destination_config={\n \"destination_connection_profile\": destination.id,\n \"bigquery_destination_config\": {\n \"data_freshness\": \"900s\",\n \"source_hierarchy_datasets\": {\n \"dataset_template\": {\n \"location\": \"us-central1\",\n },\n },\n },\n },\n backfill_none={})\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var instance = new Gcp.Sql.DatabaseInstance(\"instance\", new()\n {\n Name = \"sql-server\",\n DatabaseVersion = \"SQLSERVER_2019_STANDARD\",\n Region = \"us-central1\",\n RootPassword = \"root-password\",\n DeletionProtection = true,\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-custom-2-4096\",\n IpConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationArgs\n {\n AuthorizedNetworks = new[]\n {\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.71.242.81\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.28.29\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.6.157\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.234.134\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.239.218\",\n },\n },\n },\n },\n });\n\n var user = new Gcp.Sql.User(\"user\", new()\n {\n Name = \"user\",\n Instance = instance.Name,\n Password = \"password\",\n });\n\n var db = new Gcp.Sql.Database(\"db\", new()\n {\n Name = \"db\",\n Instance = instance.Name,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n user,\n },\n });\n\n var source = new Gcp.Datastream.ConnectionProfile(\"source\", new()\n {\n DisplayName = \"SQL Server Source\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n SqlServerProfile = new Gcp.Datastream.Inputs.ConnectionProfileSqlServerProfileArgs\n {\n Hostname = instance.PublicIpAddress,\n Port = 1433,\n Username = user.Name,\n Password = user.Password,\n Database = db.Name,\n },\n });\n\n var destination = new Gcp.Datastream.ConnectionProfile(\"destination\", new()\n {\n DisplayName = \"BigQuery Destination\",\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-profile\",\n BigqueryProfile = null,\n });\n\n var @default = new Gcp.Datastream.Stream(\"default\", new()\n {\n DisplayName = \"SQL Server to BigQuery\",\n Location = \"us-central1\",\n StreamId = \"stream\",\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = source.Id,\n SqlServerSourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigArgs\n {\n IncludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs\n {\n Schemas = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArgs\n {\n Schema = \"schema\",\n Tables = new[]\n {\n new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArgs\n {\n Table = \"table\",\n },\n },\n },\n },\n },\n ChangeTables = null,\n },\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destination.Id,\n BigqueryDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigArgs\n {\n DataFreshness = \"900s\",\n SourceHierarchyDatasets = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs\n {\n DatasetTemplate = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs\n {\n Location = \"us-central1\",\n },\n },\n },\n },\n BackfillNone = null,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tinstance, err := sql.NewDatabaseInstance(ctx, \"instance\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"sql-server\"),\n\t\t\tDatabaseVersion: pulumi.String(\"SQLSERVER_2019_STANDARD\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tRootPassword: pulumi.String(\"root-password\"),\n\t\t\tDeletionProtection: pulumi.Bool(true),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-custom-2-4096\"),\n\t\t\t\tIpConfiguration: \u0026sql.DatabaseInstanceSettingsIpConfigurationArgs{\n\t\t\t\t\tAuthorizedNetworks: sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArray{\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.71.242.81\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.28.29\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.6.157\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.234.134\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.239.218\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuser, err := sql.NewUser(ctx, \"user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"user\"),\n\t\t\tInstance: instance.Name,\n\t\t\tPassword: pulumi.String(\"password\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdb, err := sql.NewDatabase(ctx, \"db\", \u0026sql.DatabaseArgs{\n\t\t\tName: pulumi.String(\"db\"),\n\t\t\tInstance: instance.Name,\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tuser,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsource, err := datastream.NewConnectionProfile(ctx, \"source\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"SQL Server Source\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tSqlServerProfile: \u0026datastream.ConnectionProfileSqlServerProfileArgs{\n\t\t\t\tHostname: instance.PublicIpAddress,\n\t\t\t\tPort: pulumi.Int(1433),\n\t\t\t\tUsername: user.Name,\n\t\t\t\tPassword: user.Password,\n\t\t\t\tDatabase: db.Name,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestination, err := datastream.NewConnectionProfile(ctx, \"destination\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"BigQuery Destination\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-profile\"),\n\t\t\tBigqueryProfile: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"default\", \u0026datastream.StreamArgs{\n\t\t\tDisplayName: pulumi.String(\"SQL Server to BigQuery\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tStreamId: pulumi.String(\"stream\"),\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: source.ID(),\n\t\t\t\tSqlServerSourceConfig: \u0026datastream.StreamSourceConfigSqlServerSourceConfigArgs{\n\t\t\t\t\tIncludeObjects: \u0026datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs{\n\t\t\t\t\t\tSchemas: datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArray{\n\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArgs{\n\t\t\t\t\t\t\t\tSchema: pulumi.String(\"schema\"),\n\t\t\t\t\t\t\t\tTables: datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArray{\n\t\t\t\t\t\t\t\t\t\u0026datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArgs{\n\t\t\t\t\t\t\t\t\t\tTable: pulumi.String(\"table\"),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tChangeTables: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destination.ID(),\n\t\t\t\tBigqueryDestinationConfig: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigArgs{\n\t\t\t\t\tDataFreshness: pulumi.String(\"900s\"),\n\t\t\t\t\tSourceHierarchyDatasets: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs{\n\t\t\t\t\t\tDatasetTemplate: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs{\n\t\t\t\t\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillNone: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsIpConfigurationArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.sql.Database;\nimport com.pulumi.gcp.sql.DatabaseArgs;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileSqlServerProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileBigqueryProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigChangeTablesArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillNoneArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var instance = new DatabaseInstance(\"instance\", DatabaseInstanceArgs.builder()\n .name(\"sql-server\")\n .databaseVersion(\"SQLSERVER_2019_STANDARD\")\n .region(\"us-central1\")\n .rootPassword(\"root-password\")\n .deletionProtection(\"true\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-custom-2-4096\")\n .ipConfiguration(DatabaseInstanceSettingsIpConfigurationArgs.builder()\n .authorizedNetworks( \n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.71.242.81\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.28.29\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.6.157\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.234.134\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.239.218\")\n .build())\n .build())\n .build())\n .build());\n\n var user = new User(\"user\", UserArgs.builder()\n .name(\"user\")\n .instance(instance.name())\n .password(\"password\")\n .build());\n\n var db = new Database(\"db\", DatabaseArgs.builder()\n .name(\"db\")\n .instance(instance.name())\n .build(), CustomResourceOptions.builder()\n .dependsOn(user)\n .build());\n\n var source = new ConnectionProfile(\"source\", ConnectionProfileArgs.builder()\n .displayName(\"SQL Server Source\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .sqlServerProfile(ConnectionProfileSqlServerProfileArgs.builder()\n .hostname(instance.publicIpAddress())\n .port(1433)\n .username(user.name())\n .password(user.password())\n .database(db.name())\n .build())\n .build());\n\n var destination = new ConnectionProfile(\"destination\", ConnectionProfileArgs.builder()\n .displayName(\"BigQuery Destination\")\n .location(\"us-central1\")\n .connectionProfileId(\"destination-profile\")\n .bigqueryProfile()\n .build());\n\n var default_ = new Stream(\"default\", StreamArgs.builder()\n .displayName(\"SQL Server to BigQuery\")\n .location(\"us-central1\")\n .streamId(\"stream\")\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(source.id())\n .sqlServerSourceConfig(StreamSourceConfigSqlServerSourceConfigArgs.builder()\n .includeObjects(StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs.builder()\n .schemas(StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArgs.builder()\n .schema(\"schema\")\n .tables(StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArgs.builder()\n .table(\"table\")\n .build())\n .build())\n .build())\n .changeTables()\n .build())\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destination.id())\n .bigqueryDestinationConfig(StreamDestinationConfigBigqueryDestinationConfigArgs.builder()\n .dataFreshness(\"900s\")\n .sourceHierarchyDatasets(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs.builder()\n .datasetTemplate(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs.builder()\n .location(\"us-central1\")\n .build())\n .build())\n .build())\n .build())\n .backfillNone()\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instance:\n type: gcp:sql:DatabaseInstance\n properties:\n name: sql-server\n databaseVersion: SQLSERVER_2019_STANDARD\n region: us-central1\n rootPassword: root-password\n deletionProtection: 'true'\n settings:\n tier: db-custom-2-4096\n ipConfiguration:\n authorizedNetworks:\n - value: 34.71.242.81\n - value: 34.72.28.29\n - value: 34.67.6.157\n - value: 34.67.234.134\n - value: 34.72.239.218\n db:\n type: gcp:sql:Database\n properties:\n name: db\n instance: ${instance.name}\n options:\n dependson:\n - ${user}\n user:\n type: gcp:sql:User\n properties:\n name: user\n instance: ${instance.name}\n password: password\n source:\n type: gcp:datastream:ConnectionProfile\n properties:\n displayName: SQL Server Source\n location: us-central1\n connectionProfileId: source-profile\n sqlServerProfile:\n hostname: ${instance.publicIpAddress}\n port: 1433\n username: ${user.name}\n password: ${user.password}\n database: ${db.name}\n destination:\n type: gcp:datastream:ConnectionProfile\n properties:\n displayName: BigQuery Destination\n location: us-central1\n connectionProfileId: destination-profile\n bigqueryProfile: {}\n default:\n type: gcp:datastream:Stream\n properties:\n displayName: SQL Server to BigQuery\n location: us-central1\n streamId: stream\n sourceConfig:\n sourceConnectionProfile: ${source.id}\n sqlServerSourceConfig:\n includeObjects:\n schemas:\n - schema: schema\n tables:\n - table: table\n changeTables: {}\n destinationConfig:\n destinationConnectionProfile: ${destination.id}\n bigqueryDestinationConfig:\n dataFreshness: 900s\n sourceHierarchyDatasets:\n datasetTemplate:\n location: us-central1\n backfillNone: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Datastream Stream Postgresql Bigquery Dataset Id\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as random from \"@pulumi/random\";\n\nconst postgres = new gcp.bigquery.Dataset(\"postgres\", {\n datasetId: \"postgres\",\n friendlyName: \"postgres\",\n description: \"Database of postgres\",\n location: \"us-central1\",\n});\nconst destinationConnectionProfile2 = new gcp.datastream.ConnectionProfile(\"destination_connection_profile2\", {\n displayName: \"Connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"dest-profile\",\n bigqueryProfile: {},\n});\nconst instance = new gcp.sql.DatabaseInstance(\"instance\", {\n name: \"instance-name\",\n databaseVersion: \"MYSQL_8_0\",\n region: \"us-central1\",\n settings: {\n tier: \"db-f1-micro\",\n backupConfiguration: {\n enabled: true,\n binaryLogEnabled: true,\n },\n ipConfiguration: {\n authorizedNetworks: [\n {\n value: \"34.71.242.81\",\n },\n {\n value: \"34.72.28.29\",\n },\n {\n value: \"34.67.6.157\",\n },\n {\n value: \"34.67.234.134\",\n },\n {\n value: \"34.72.239.218\",\n },\n ],\n },\n },\n deletionProtection: false,\n});\nconst pwd = new random.RandomPassword(\"pwd\", {\n length: 16,\n special: false,\n});\nconst user = new gcp.sql.User(\"user\", {\n name: \"my-user\",\n instance: instance.name,\n host: \"%\",\n password: pwd.result,\n});\nconst sourceConnectionProfile = new gcp.datastream.ConnectionProfile(\"source_connection_profile\", {\n displayName: \"Source connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n mysqlProfile: {\n hostname: instance.publicIpAddress,\n username: user.name,\n password: user.password,\n },\n});\nconst _default = new gcp.datastream.Stream(\"default\", {\n displayName: \"postgres to bigQuery\",\n location: \"us-central1\",\n streamId: \"postgres-bigquery\",\n sourceConfig: {\n sourceConnectionProfile: sourceConnectionProfile.id,\n mysqlSourceConfig: {},\n },\n destinationConfig: {\n destinationConnectionProfile: destinationConnectionProfile2.id,\n bigqueryDestinationConfig: {\n dataFreshness: \"900s\",\n singleTargetDataset: {\n datasetId: postgres.id,\n },\n },\n },\n backfillAll: {},\n});\nconst db = new gcp.sql.Database(\"db\", {\n instance: instance.name,\n name: \"db\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_random as random\n\npostgres = gcp.bigquery.Dataset(\"postgres\",\n dataset_id=\"postgres\",\n friendly_name=\"postgres\",\n description=\"Database of postgres\",\n location=\"us-central1\")\ndestination_connection_profile2 = gcp.datastream.ConnectionProfile(\"destination_connection_profile2\",\n display_name=\"Connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"dest-profile\",\n bigquery_profile={})\ninstance = gcp.sql.DatabaseInstance(\"instance\",\n name=\"instance-name\",\n database_version=\"MYSQL_8_0\",\n region=\"us-central1\",\n settings={\n \"tier\": \"db-f1-micro\",\n \"backup_configuration\": {\n \"enabled\": True,\n \"binary_log_enabled\": True,\n },\n \"ip_configuration\": {\n \"authorized_networks\": [\n {\n \"value\": \"34.71.242.81\",\n },\n {\n \"value\": \"34.72.28.29\",\n },\n {\n \"value\": \"34.67.6.157\",\n },\n {\n \"value\": \"34.67.234.134\",\n },\n {\n \"value\": \"34.72.239.218\",\n },\n ],\n },\n },\n deletion_protection=False)\npwd = random.RandomPassword(\"pwd\",\n length=16,\n special=False)\nuser = gcp.sql.User(\"user\",\n name=\"my-user\",\n instance=instance.name,\n host=\"%\",\n password=pwd.result)\nsource_connection_profile = gcp.datastream.ConnectionProfile(\"source_connection_profile\",\n display_name=\"Source connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n mysql_profile={\n \"hostname\": instance.public_ip_address,\n \"username\": user.name,\n \"password\": user.password,\n })\ndefault = gcp.datastream.Stream(\"default\",\n display_name=\"postgres to bigQuery\",\n location=\"us-central1\",\n stream_id=\"postgres-bigquery\",\n source_config={\n \"source_connection_profile\": source_connection_profile.id,\n \"mysql_source_config\": {},\n },\n destination_config={\n \"destination_connection_profile\": destination_connection_profile2.id,\n \"bigquery_destination_config\": {\n \"data_freshness\": \"900s\",\n \"single_target_dataset\": {\n \"dataset_id\": postgres.id,\n },\n },\n },\n backfill_all={})\ndb = gcp.sql.Database(\"db\",\n instance=instance.name,\n name=\"db\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Random = Pulumi.Random;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var postgres = new Gcp.BigQuery.Dataset(\"postgres\", new()\n {\n DatasetId = \"postgres\",\n FriendlyName = \"postgres\",\n Description = \"Database of postgres\",\n Location = \"us-central1\",\n });\n\n var destinationConnectionProfile2 = new Gcp.Datastream.ConnectionProfile(\"destination_connection_profile2\", new()\n {\n DisplayName = \"Connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"dest-profile\",\n BigqueryProfile = null,\n });\n\n var instance = new Gcp.Sql.DatabaseInstance(\"instance\", new()\n {\n Name = \"instance-name\",\n DatabaseVersion = \"MYSQL_8_0\",\n Region = \"us-central1\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-f1-micro\",\n BackupConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsBackupConfigurationArgs\n {\n Enabled = true,\n BinaryLogEnabled = true,\n },\n IpConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationArgs\n {\n AuthorizedNetworks = new[]\n {\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.71.242.81\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.28.29\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.6.157\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.234.134\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.239.218\",\n },\n },\n },\n },\n DeletionProtection = false,\n });\n\n var pwd = new Random.RandomPassword(\"pwd\", new()\n {\n Length = 16,\n Special = false,\n });\n\n var user = new Gcp.Sql.User(\"user\", new()\n {\n Name = \"my-user\",\n Instance = instance.Name,\n Host = \"%\",\n Password = pwd.Result,\n });\n\n var sourceConnectionProfile = new Gcp.Datastream.ConnectionProfile(\"source_connection_profile\", new()\n {\n DisplayName = \"Source connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n MysqlProfile = new Gcp.Datastream.Inputs.ConnectionProfileMysqlProfileArgs\n {\n Hostname = instance.PublicIpAddress,\n Username = user.Name,\n Password = user.Password,\n },\n });\n\n var @default = new Gcp.Datastream.Stream(\"default\", new()\n {\n DisplayName = \"postgres to bigQuery\",\n Location = \"us-central1\",\n StreamId = \"postgres-bigquery\",\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = sourceConnectionProfile.Id,\n MysqlSourceConfig = null,\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destinationConnectionProfile2.Id,\n BigqueryDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigArgs\n {\n DataFreshness = \"900s\",\n SingleTargetDataset = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetArgs\n {\n DatasetId = postgres.Id,\n },\n },\n },\n BackfillAll = null,\n });\n\n var db = new Gcp.Sql.Database(\"db\", new()\n {\n Instance = instance.Name,\n Name = \"db\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi-random/sdk/v4/go/random\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpostgres, err := bigquery.NewDataset(ctx, \"postgres\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"postgres\"),\n\t\t\tFriendlyName: pulumi.String(\"postgres\"),\n\t\t\tDescription: pulumi.String(\"Database of postgres\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestinationConnectionProfile2, err := datastream.NewConnectionProfile(ctx, \"destination_connection_profile2\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"dest-profile\"),\n\t\t\tBigqueryProfile: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinstance, err := sql.NewDatabaseInstance(ctx, \"instance\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"instance-name\"),\n\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_8_0\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-f1-micro\"),\n\t\t\t\tBackupConfiguration: \u0026sql.DatabaseInstanceSettingsBackupConfigurationArgs{\n\t\t\t\t\tEnabled: pulumi.Bool(true),\n\t\t\t\t\tBinaryLogEnabled: pulumi.Bool(true),\n\t\t\t\t},\n\t\t\t\tIpConfiguration: \u0026sql.DatabaseInstanceSettingsIpConfigurationArgs{\n\t\t\t\t\tAuthorizedNetworks: sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArray{\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.71.242.81\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.28.29\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.6.157\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.234.134\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.239.218\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpwd, err := random.NewRandomPassword(ctx, \"pwd\", \u0026random.RandomPasswordArgs{\n\t\t\tLength: pulumi.Int(16),\n\t\t\tSpecial: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuser, err := sql.NewUser(ctx, \"user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"my-user\"),\n\t\t\tInstance: instance.Name,\n\t\t\tHost: pulumi.String(\"%\"),\n\t\t\tPassword: pwd.Result,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsourceConnectionProfile, err := datastream.NewConnectionProfile(ctx, \"source_connection_profile\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Source connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tMysqlProfile: \u0026datastream.ConnectionProfileMysqlProfileArgs{\n\t\t\t\tHostname: instance.PublicIpAddress,\n\t\t\t\tUsername: user.Name,\n\t\t\t\tPassword: user.Password,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"default\", \u0026datastream.StreamArgs{\n\t\t\tDisplayName: pulumi.String(\"postgres to bigQuery\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tStreamId: pulumi.String(\"postgres-bigquery\"),\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: sourceConnectionProfile.ID(),\n\t\t\t\tMysqlSourceConfig: nil,\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destinationConnectionProfile2.ID(),\n\t\t\t\tBigqueryDestinationConfig: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigArgs{\n\t\t\t\t\tDataFreshness: pulumi.String(\"900s\"),\n\t\t\t\t\tSingleTargetDataset: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetArgs{\n\t\t\t\t\t\tDatasetId: postgres.ID(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillAll: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = sql.NewDatabase(ctx, \"db\", \u0026sql.DatabaseArgs{\n\t\t\tInstance: instance.Name,\n\t\t\tName: pulumi.String(\"db\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileBigqueryProfileArgs;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsBackupConfigurationArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsIpConfigurationArgs;\nimport com.pulumi.random.RandomPassword;\nimport com.pulumi.random.RandomPasswordArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileMysqlProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigMysqlSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillAllArgs;\nimport com.pulumi.gcp.sql.Database;\nimport com.pulumi.gcp.sql.DatabaseArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var postgres = new Dataset(\"postgres\", DatasetArgs.builder()\n .datasetId(\"postgres\")\n .friendlyName(\"postgres\")\n .description(\"Database of postgres\")\n .location(\"us-central1\")\n .build());\n\n var destinationConnectionProfile2 = new ConnectionProfile(\"destinationConnectionProfile2\", ConnectionProfileArgs.builder()\n .displayName(\"Connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"dest-profile\")\n .bigqueryProfile()\n .build());\n\n var instance = new DatabaseInstance(\"instance\", DatabaseInstanceArgs.builder()\n .name(\"instance-name\")\n .databaseVersion(\"MYSQL_8_0\")\n .region(\"us-central1\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-f1-micro\")\n .backupConfiguration(DatabaseInstanceSettingsBackupConfigurationArgs.builder()\n .enabled(true)\n .binaryLogEnabled(true)\n .build())\n .ipConfiguration(DatabaseInstanceSettingsIpConfigurationArgs.builder()\n .authorizedNetworks( \n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.71.242.81\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.28.29\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.6.157\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.234.134\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.239.218\")\n .build())\n .build())\n .build())\n .deletionProtection(false)\n .build());\n\n var pwd = new RandomPassword(\"pwd\", RandomPasswordArgs.builder()\n .length(16)\n .special(false)\n .build());\n\n var user = new User(\"user\", UserArgs.builder()\n .name(\"my-user\")\n .instance(instance.name())\n .host(\"%\")\n .password(pwd.result())\n .build());\n\n var sourceConnectionProfile = new ConnectionProfile(\"sourceConnectionProfile\", ConnectionProfileArgs.builder()\n .displayName(\"Source connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .mysqlProfile(ConnectionProfileMysqlProfileArgs.builder()\n .hostname(instance.publicIpAddress())\n .username(user.name())\n .password(user.password())\n .build())\n .build());\n\n var default_ = new Stream(\"default\", StreamArgs.builder()\n .displayName(\"postgres to bigQuery\")\n .location(\"us-central1\")\n .streamId(\"postgres-bigquery\")\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(sourceConnectionProfile.id())\n .mysqlSourceConfig()\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destinationConnectionProfile2.id())\n .bigqueryDestinationConfig(StreamDestinationConfigBigqueryDestinationConfigArgs.builder()\n .dataFreshness(\"900s\")\n .singleTargetDataset(StreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetArgs.builder()\n .datasetId(postgres.id())\n .build())\n .build())\n .build())\n .backfillAll()\n .build());\n\n var db = new Database(\"db\", DatabaseArgs.builder()\n .instance(instance.name())\n .name(\"db\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n postgres:\n type: gcp:bigquery:Dataset\n properties:\n datasetId: postgres\n friendlyName: postgres\n description: Database of postgres\n location: us-central1\n default:\n type: gcp:datastream:Stream\n properties:\n displayName: postgres to bigQuery\n location: us-central1\n streamId: postgres-bigquery\n sourceConfig:\n sourceConnectionProfile: ${sourceConnectionProfile.id}\n mysqlSourceConfig: {}\n destinationConfig:\n destinationConnectionProfile: ${destinationConnectionProfile2.id}\n bigqueryDestinationConfig:\n dataFreshness: 900s\n singleTargetDataset:\n datasetId: ${postgres.id}\n backfillAll: {}\n destinationConnectionProfile2:\n type: gcp:datastream:ConnectionProfile\n name: destination_connection_profile2\n properties:\n displayName: Connection profile\n location: us-central1\n connectionProfileId: dest-profile\n bigqueryProfile: {}\n instance:\n type: gcp:sql:DatabaseInstance\n properties:\n name: instance-name\n databaseVersion: MYSQL_8_0\n region: us-central1\n settings:\n tier: db-f1-micro\n backupConfiguration:\n enabled: true\n binaryLogEnabled: true\n ipConfiguration:\n authorizedNetworks:\n - value: 34.71.242.81\n - value: 34.72.28.29\n - value: 34.67.6.157\n - value: 34.67.234.134\n - value: 34.72.239.218\n deletionProtection: false\n db:\n type: gcp:sql:Database\n properties:\n instance: ${instance.name}\n name: db\n pwd:\n type: random:RandomPassword\n properties:\n length: 16\n special: false\n user:\n type: gcp:sql:User\n properties:\n name: my-user\n instance: ${instance.name}\n host: '%'\n password: ${pwd.result}\n sourceConnectionProfile:\n type: gcp:datastream:ConnectionProfile\n name: source_connection_profile\n properties:\n displayName: Source connection profile\n location: us-central1\n connectionProfileId: source-profile\n mysqlProfile:\n hostname: ${instance.publicIpAddress}\n username: ${user.name}\n password: ${user.password}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Datastream Stream Bigquery\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as random from \"@pulumi/random\";\n\nconst project = gcp.organizations.getProject({});\nconst instance = new gcp.sql.DatabaseInstance(\"instance\", {\n name: \"my-instance\",\n databaseVersion: \"MYSQL_8_0\",\n region: \"us-central1\",\n settings: {\n tier: \"db-f1-micro\",\n backupConfiguration: {\n enabled: true,\n binaryLogEnabled: true,\n },\n ipConfiguration: {\n authorizedNetworks: [\n {\n value: \"34.71.242.81\",\n },\n {\n value: \"34.72.28.29\",\n },\n {\n value: \"34.67.6.157\",\n },\n {\n value: \"34.67.234.134\",\n },\n {\n value: \"34.72.239.218\",\n },\n ],\n },\n },\n deletionProtection: true,\n});\nconst db = new gcp.sql.Database(\"db\", {\n instance: instance.name,\n name: \"db\",\n});\nconst pwd = new random.RandomPassword(\"pwd\", {\n length: 16,\n special: false,\n});\nconst user = new gcp.sql.User(\"user\", {\n name: \"user\",\n instance: instance.name,\n host: \"%\",\n password: pwd.result,\n});\nconst sourceConnectionProfile = new gcp.datastream.ConnectionProfile(\"source_connection_profile\", {\n displayName: \"Source connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n mysqlProfile: {\n hostname: instance.publicIpAddress,\n username: user.name,\n password: user.password,\n },\n});\nconst bqSa = gcp.bigquery.getDefaultServiceAccount({});\nconst bigqueryKeyUser = new gcp.kms.CryptoKeyIAMMember(\"bigquery_key_user\", {\n cryptoKeyId: \"bigquery-kms-name\",\n role: \"roles/cloudkms.cryptoKeyEncrypterDecrypter\",\n member: bqSa.then(bqSa =\u003e `serviceAccount:${bqSa.email}`),\n});\nconst destinationConnectionProfile = new gcp.datastream.ConnectionProfile(\"destination_connection_profile\", {\n displayName: \"Connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"destination-profile\",\n bigqueryProfile: {},\n});\nconst _default = new gcp.datastream.Stream(\"default\", {\n streamId: \"my-stream\",\n location: \"us-central1\",\n displayName: \"my stream\",\n sourceConfig: {\n sourceConnectionProfile: sourceConnectionProfile.id,\n mysqlSourceConfig: {},\n },\n destinationConfig: {\n destinationConnectionProfile: destinationConnectionProfile.id,\n bigqueryDestinationConfig: {\n sourceHierarchyDatasets: {\n datasetTemplate: {\n location: \"us-central1\",\n kmsKeyName: \"bigquery-kms-name\",\n },\n },\n },\n },\n backfillNone: {},\n}, {\n dependsOn: [bigqueryKeyUser],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_random as random\n\nproject = gcp.organizations.get_project()\ninstance = gcp.sql.DatabaseInstance(\"instance\",\n name=\"my-instance\",\n database_version=\"MYSQL_8_0\",\n region=\"us-central1\",\n settings={\n \"tier\": \"db-f1-micro\",\n \"backup_configuration\": {\n \"enabled\": True,\n \"binary_log_enabled\": True,\n },\n \"ip_configuration\": {\n \"authorized_networks\": [\n {\n \"value\": \"34.71.242.81\",\n },\n {\n \"value\": \"34.72.28.29\",\n },\n {\n \"value\": \"34.67.6.157\",\n },\n {\n \"value\": \"34.67.234.134\",\n },\n {\n \"value\": \"34.72.239.218\",\n },\n ],\n },\n },\n deletion_protection=True)\ndb = gcp.sql.Database(\"db\",\n instance=instance.name,\n name=\"db\")\npwd = random.RandomPassword(\"pwd\",\n length=16,\n special=False)\nuser = gcp.sql.User(\"user\",\n name=\"user\",\n instance=instance.name,\n host=\"%\",\n password=pwd.result)\nsource_connection_profile = gcp.datastream.ConnectionProfile(\"source_connection_profile\",\n display_name=\"Source connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n mysql_profile={\n \"hostname\": instance.public_ip_address,\n \"username\": user.name,\n \"password\": user.password,\n })\nbq_sa = gcp.bigquery.get_default_service_account()\nbigquery_key_user = gcp.kms.CryptoKeyIAMMember(\"bigquery_key_user\",\n crypto_key_id=\"bigquery-kms-name\",\n role=\"roles/cloudkms.cryptoKeyEncrypterDecrypter\",\n member=f\"serviceAccount:{bq_sa.email}\")\ndestination_connection_profile = gcp.datastream.ConnectionProfile(\"destination_connection_profile\",\n display_name=\"Connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"destination-profile\",\n bigquery_profile={})\ndefault = gcp.datastream.Stream(\"default\",\n stream_id=\"my-stream\",\n location=\"us-central1\",\n display_name=\"my stream\",\n source_config={\n \"source_connection_profile\": source_connection_profile.id,\n \"mysql_source_config\": {},\n },\n destination_config={\n \"destination_connection_profile\": destination_connection_profile.id,\n \"bigquery_destination_config\": {\n \"source_hierarchy_datasets\": {\n \"dataset_template\": {\n \"location\": \"us-central1\",\n \"kms_key_name\": \"bigquery-kms-name\",\n },\n },\n },\n },\n backfill_none={},\n opts = pulumi.ResourceOptions(depends_on=[bigquery_key_user]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Random = Pulumi.Random;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var instance = new Gcp.Sql.DatabaseInstance(\"instance\", new()\n {\n Name = \"my-instance\",\n DatabaseVersion = \"MYSQL_8_0\",\n Region = \"us-central1\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-f1-micro\",\n BackupConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsBackupConfigurationArgs\n {\n Enabled = true,\n BinaryLogEnabled = true,\n },\n IpConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationArgs\n {\n AuthorizedNetworks = new[]\n {\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.71.242.81\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.28.29\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.6.157\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.234.134\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.239.218\",\n },\n },\n },\n },\n DeletionProtection = true,\n });\n\n var db = new Gcp.Sql.Database(\"db\", new()\n {\n Instance = instance.Name,\n Name = \"db\",\n });\n\n var pwd = new Random.RandomPassword(\"pwd\", new()\n {\n Length = 16,\n Special = false,\n });\n\n var user = new Gcp.Sql.User(\"user\", new()\n {\n Name = \"user\",\n Instance = instance.Name,\n Host = \"%\",\n Password = pwd.Result,\n });\n\n var sourceConnectionProfile = new Gcp.Datastream.ConnectionProfile(\"source_connection_profile\", new()\n {\n DisplayName = \"Source connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n MysqlProfile = new Gcp.Datastream.Inputs.ConnectionProfileMysqlProfileArgs\n {\n Hostname = instance.PublicIpAddress,\n Username = user.Name,\n Password = user.Password,\n },\n });\n\n var bqSa = Gcp.BigQuery.GetDefaultServiceAccount.Invoke();\n\n var bigqueryKeyUser = new Gcp.Kms.CryptoKeyIAMMember(\"bigquery_key_user\", new()\n {\n CryptoKeyId = \"bigquery-kms-name\",\n Role = \"roles/cloudkms.cryptoKeyEncrypterDecrypter\",\n Member = $\"serviceAccount:{bqSa.Apply(getDefaultServiceAccountResult =\u003e getDefaultServiceAccountResult.Email)}\",\n });\n\n var destinationConnectionProfile = new Gcp.Datastream.ConnectionProfile(\"destination_connection_profile\", new()\n {\n DisplayName = \"Connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-profile\",\n BigqueryProfile = null,\n });\n\n var @default = new Gcp.Datastream.Stream(\"default\", new()\n {\n StreamId = \"my-stream\",\n Location = \"us-central1\",\n DisplayName = \"my stream\",\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = sourceConnectionProfile.Id,\n MysqlSourceConfig = null,\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destinationConnectionProfile.Id,\n BigqueryDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigArgs\n {\n SourceHierarchyDatasets = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs\n {\n DatasetTemplate = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs\n {\n Location = \"us-central1\",\n KmsKeyName = \"bigquery-kms-name\",\n },\n },\n },\n },\n BackfillNone = null,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n bigqueryKeyUser,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi-random/sdk/v4/go/random\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinstance, err := sql.NewDatabaseInstance(ctx, \"instance\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"my-instance\"),\n\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_8_0\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-f1-micro\"),\n\t\t\t\tBackupConfiguration: \u0026sql.DatabaseInstanceSettingsBackupConfigurationArgs{\n\t\t\t\t\tEnabled: pulumi.Bool(true),\n\t\t\t\t\tBinaryLogEnabled: pulumi.Bool(true),\n\t\t\t\t},\n\t\t\t\tIpConfiguration: \u0026sql.DatabaseInstanceSettingsIpConfigurationArgs{\n\t\t\t\t\tAuthorizedNetworks: sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArray{\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.71.242.81\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.28.29\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.6.157\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.234.134\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.239.218\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = sql.NewDatabase(ctx, \"db\", \u0026sql.DatabaseArgs{\n\t\t\tInstance: instance.Name,\n\t\t\tName: pulumi.String(\"db\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpwd, err := random.NewRandomPassword(ctx, \"pwd\", \u0026random.RandomPasswordArgs{\n\t\t\tLength: pulumi.Int(16),\n\t\t\tSpecial: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuser, err := sql.NewUser(ctx, \"user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"user\"),\n\t\t\tInstance: instance.Name,\n\t\t\tHost: pulumi.String(\"%\"),\n\t\t\tPassword: pwd.Result,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsourceConnectionProfile, err := datastream.NewConnectionProfile(ctx, \"source_connection_profile\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Source connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tMysqlProfile: \u0026datastream.ConnectionProfileMysqlProfileArgs{\n\t\t\t\tHostname: instance.PublicIpAddress,\n\t\t\t\tUsername: user.Name,\n\t\t\t\tPassword: user.Password,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbqSa, err := bigquery.GetDefaultServiceAccount(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbigqueryKeyUser, err := kms.NewCryptoKeyIAMMember(ctx, \"bigquery_key_user\", \u0026kms.CryptoKeyIAMMemberArgs{\n\t\t\tCryptoKeyId: pulumi.String(\"bigquery-kms-name\"),\n\t\t\tRole: pulumi.String(\"roles/cloudkms.cryptoKeyEncrypterDecrypter\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:%v\", bqSa.Email),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestinationConnectionProfile, err := datastream.NewConnectionProfile(ctx, \"destination_connection_profile\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-profile\"),\n\t\t\tBigqueryProfile: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"default\", \u0026datastream.StreamArgs{\n\t\t\tStreamId: pulumi.String(\"my-stream\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDisplayName: pulumi.String(\"my stream\"),\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: sourceConnectionProfile.ID(),\n\t\t\t\tMysqlSourceConfig: nil,\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destinationConnectionProfile.ID(),\n\t\t\t\tBigqueryDestinationConfig: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigArgs{\n\t\t\t\t\tSourceHierarchyDatasets: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs{\n\t\t\t\t\t\tDatasetTemplate: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs{\n\t\t\t\t\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\t\t\t\t\tKmsKeyName: pulumi.String(\"bigquery-kms-name\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillNone: nil,\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tbigqueryKeyUser,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsBackupConfigurationArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsIpConfigurationArgs;\nimport com.pulumi.gcp.sql.Database;\nimport com.pulumi.gcp.sql.DatabaseArgs;\nimport com.pulumi.random.RandomPassword;\nimport com.pulumi.random.RandomPasswordArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileMysqlProfileArgs;\nimport com.pulumi.gcp.bigquery.BigqueryFunctions;\nimport com.pulumi.gcp.bigquery.inputs.GetDefaultServiceAccountArgs;\nimport com.pulumi.gcp.kms.CryptoKeyIAMMember;\nimport com.pulumi.gcp.kms.CryptoKeyIAMMemberArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileBigqueryProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigMysqlSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillNoneArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var instance = new DatabaseInstance(\"instance\", DatabaseInstanceArgs.builder()\n .name(\"my-instance\")\n .databaseVersion(\"MYSQL_8_0\")\n .region(\"us-central1\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-f1-micro\")\n .backupConfiguration(DatabaseInstanceSettingsBackupConfigurationArgs.builder()\n .enabled(true)\n .binaryLogEnabled(true)\n .build())\n .ipConfiguration(DatabaseInstanceSettingsIpConfigurationArgs.builder()\n .authorizedNetworks( \n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.71.242.81\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.28.29\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.6.157\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.234.134\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.239.218\")\n .build())\n .build())\n .build())\n .deletionProtection(true)\n .build());\n\n var db = new Database(\"db\", DatabaseArgs.builder()\n .instance(instance.name())\n .name(\"db\")\n .build());\n\n var pwd = new RandomPassword(\"pwd\", RandomPasswordArgs.builder()\n .length(16)\n .special(false)\n .build());\n\n var user = new User(\"user\", UserArgs.builder()\n .name(\"user\")\n .instance(instance.name())\n .host(\"%\")\n .password(pwd.result())\n .build());\n\n var sourceConnectionProfile = new ConnectionProfile(\"sourceConnectionProfile\", ConnectionProfileArgs.builder()\n .displayName(\"Source connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .mysqlProfile(ConnectionProfileMysqlProfileArgs.builder()\n .hostname(instance.publicIpAddress())\n .username(user.name())\n .password(user.password())\n .build())\n .build());\n\n final var bqSa = BigqueryFunctions.getDefaultServiceAccount();\n\n var bigqueryKeyUser = new CryptoKeyIAMMember(\"bigqueryKeyUser\", CryptoKeyIAMMemberArgs.builder()\n .cryptoKeyId(\"bigquery-kms-name\")\n .role(\"roles/cloudkms.cryptoKeyEncrypterDecrypter\")\n .member(String.format(\"serviceAccount:%s\", bqSa.applyValue(getDefaultServiceAccountResult -\u003e getDefaultServiceAccountResult.email())))\n .build());\n\n var destinationConnectionProfile = new ConnectionProfile(\"destinationConnectionProfile\", ConnectionProfileArgs.builder()\n .displayName(\"Connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"destination-profile\")\n .bigqueryProfile()\n .build());\n\n var default_ = new Stream(\"default\", StreamArgs.builder()\n .streamId(\"my-stream\")\n .location(\"us-central1\")\n .displayName(\"my stream\")\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(sourceConnectionProfile.id())\n .mysqlSourceConfig()\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destinationConnectionProfile.id())\n .bigqueryDestinationConfig(StreamDestinationConfigBigqueryDestinationConfigArgs.builder()\n .sourceHierarchyDatasets(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs.builder()\n .datasetTemplate(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs.builder()\n .location(\"us-central1\")\n .kmsKeyName(\"bigquery-kms-name\")\n .build())\n .build())\n .build())\n .build())\n .backfillNone()\n .build(), CustomResourceOptions.builder()\n .dependsOn(bigqueryKeyUser)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instance:\n type: gcp:sql:DatabaseInstance\n properties:\n name: my-instance\n databaseVersion: MYSQL_8_0\n region: us-central1\n settings:\n tier: db-f1-micro\n backupConfiguration:\n enabled: true\n binaryLogEnabled: true\n ipConfiguration:\n authorizedNetworks:\n - value: 34.71.242.81\n - value: 34.72.28.29\n - value: 34.67.6.157\n - value: 34.67.234.134\n - value: 34.72.239.218\n deletionProtection: true\n db:\n type: gcp:sql:Database\n properties:\n instance: ${instance.name}\n name: db\n pwd:\n type: random:RandomPassword\n properties:\n length: 16\n special: false\n user:\n type: gcp:sql:User\n properties:\n name: user\n instance: ${instance.name}\n host: '%'\n password: ${pwd.result}\n sourceConnectionProfile:\n type: gcp:datastream:ConnectionProfile\n name: source_connection_profile\n properties:\n displayName: Source connection profile\n location: us-central1\n connectionProfileId: source-profile\n mysqlProfile:\n hostname: ${instance.publicIpAddress}\n username: ${user.name}\n password: ${user.password}\n bigqueryKeyUser:\n type: gcp:kms:CryptoKeyIAMMember\n name: bigquery_key_user\n properties:\n cryptoKeyId: bigquery-kms-name\n role: roles/cloudkms.cryptoKeyEncrypterDecrypter\n member: serviceAccount:${bqSa.email}\n destinationConnectionProfile:\n type: gcp:datastream:ConnectionProfile\n name: destination_connection_profile\n properties:\n displayName: Connection profile\n location: us-central1\n connectionProfileId: destination-profile\n bigqueryProfile: {}\n default:\n type: gcp:datastream:Stream\n properties:\n streamId: my-stream\n location: us-central1\n displayName: my stream\n sourceConfig:\n sourceConnectionProfile: ${sourceConnectionProfile.id}\n mysqlSourceConfig: {}\n destinationConfig:\n destinationConnectionProfile: ${destinationConnectionProfile.id}\n bigqueryDestinationConfig:\n sourceHierarchyDatasets:\n datasetTemplate:\n location: us-central1\n kmsKeyName: bigquery-kms-name\n backfillNone: {}\n options:\n dependson:\n - ${bigqueryKeyUser}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n bqSa:\n fn::invoke:\n Function: gcp:bigquery:getDefaultServiceAccount\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Datastream Stream Bigquery Append Only\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as random from \"@pulumi/random\";\n\nconst project = gcp.organizations.getProject({});\nconst instance = new gcp.sql.DatabaseInstance(\"instance\", {\n name: \"my-instance\",\n databaseVersion: \"MYSQL_8_0\",\n region: \"us-central1\",\n settings: {\n tier: \"db-f1-micro\",\n backupConfiguration: {\n enabled: true,\n binaryLogEnabled: true,\n },\n ipConfiguration: {\n authorizedNetworks: [\n {\n value: \"34.71.242.81\",\n },\n {\n value: \"34.72.28.29\",\n },\n {\n value: \"34.67.6.157\",\n },\n {\n value: \"34.67.234.134\",\n },\n {\n value: \"34.72.239.218\",\n },\n ],\n },\n },\n deletionProtection: true,\n});\nconst db = new gcp.sql.Database(\"db\", {\n instance: instance.name,\n name: \"db\",\n});\nconst pwd = new random.RandomPassword(\"pwd\", {\n length: 16,\n special: false,\n});\nconst user = new gcp.sql.User(\"user\", {\n name: \"user\",\n instance: instance.name,\n host: \"%\",\n password: pwd.result,\n});\nconst sourceConnectionProfile = new gcp.datastream.ConnectionProfile(\"source_connection_profile\", {\n displayName: \"Source connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"source-profile\",\n mysqlProfile: {\n hostname: instance.publicIpAddress,\n username: user.name,\n password: user.password,\n },\n});\nconst destinationConnectionProfile = new gcp.datastream.ConnectionProfile(\"destination_connection_profile\", {\n displayName: \"Connection profile\",\n location: \"us-central1\",\n connectionProfileId: \"destination-profile\",\n bigqueryProfile: {},\n});\nconst _default = new gcp.datastream.Stream(\"default\", {\n streamId: \"my-stream\",\n location: \"us-central1\",\n displayName: \"my stream\",\n sourceConfig: {\n sourceConnectionProfile: sourceConnectionProfile.id,\n mysqlSourceConfig: {},\n },\n destinationConfig: {\n destinationConnectionProfile: destinationConnectionProfile.id,\n bigqueryDestinationConfig: {\n sourceHierarchyDatasets: {\n datasetTemplate: {\n location: \"us-central1\",\n },\n },\n appendOnly: {},\n },\n },\n backfillNone: {},\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_random as random\n\nproject = gcp.organizations.get_project()\ninstance = gcp.sql.DatabaseInstance(\"instance\",\n name=\"my-instance\",\n database_version=\"MYSQL_8_0\",\n region=\"us-central1\",\n settings={\n \"tier\": \"db-f1-micro\",\n \"backup_configuration\": {\n \"enabled\": True,\n \"binary_log_enabled\": True,\n },\n \"ip_configuration\": {\n \"authorized_networks\": [\n {\n \"value\": \"34.71.242.81\",\n },\n {\n \"value\": \"34.72.28.29\",\n },\n {\n \"value\": \"34.67.6.157\",\n },\n {\n \"value\": \"34.67.234.134\",\n },\n {\n \"value\": \"34.72.239.218\",\n },\n ],\n },\n },\n deletion_protection=True)\ndb = gcp.sql.Database(\"db\",\n instance=instance.name,\n name=\"db\")\npwd = random.RandomPassword(\"pwd\",\n length=16,\n special=False)\nuser = gcp.sql.User(\"user\",\n name=\"user\",\n instance=instance.name,\n host=\"%\",\n password=pwd.result)\nsource_connection_profile = gcp.datastream.ConnectionProfile(\"source_connection_profile\",\n display_name=\"Source connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"source-profile\",\n mysql_profile={\n \"hostname\": instance.public_ip_address,\n \"username\": user.name,\n \"password\": user.password,\n })\ndestination_connection_profile = gcp.datastream.ConnectionProfile(\"destination_connection_profile\",\n display_name=\"Connection profile\",\n location=\"us-central1\",\n connection_profile_id=\"destination-profile\",\n bigquery_profile={})\ndefault = gcp.datastream.Stream(\"default\",\n stream_id=\"my-stream\",\n location=\"us-central1\",\n display_name=\"my stream\",\n source_config={\n \"source_connection_profile\": source_connection_profile.id,\n \"mysql_source_config\": {},\n },\n destination_config={\n \"destination_connection_profile\": destination_connection_profile.id,\n \"bigquery_destination_config\": {\n \"source_hierarchy_datasets\": {\n \"dataset_template\": {\n \"location\": \"us-central1\",\n },\n },\n \"append_only\": {},\n },\n },\n backfill_none={})\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Random = Pulumi.Random;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var instance = new Gcp.Sql.DatabaseInstance(\"instance\", new()\n {\n Name = \"my-instance\",\n DatabaseVersion = \"MYSQL_8_0\",\n Region = \"us-central1\",\n Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs\n {\n Tier = \"db-f1-micro\",\n BackupConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsBackupConfigurationArgs\n {\n Enabled = true,\n BinaryLogEnabled = true,\n },\n IpConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationArgs\n {\n AuthorizedNetworks = new[]\n {\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.71.242.81\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.28.29\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.6.157\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.67.234.134\",\n },\n new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs\n {\n Value = \"34.72.239.218\",\n },\n },\n },\n },\n DeletionProtection = true,\n });\n\n var db = new Gcp.Sql.Database(\"db\", new()\n {\n Instance = instance.Name,\n Name = \"db\",\n });\n\n var pwd = new Random.RandomPassword(\"pwd\", new()\n {\n Length = 16,\n Special = false,\n });\n\n var user = new Gcp.Sql.User(\"user\", new()\n {\n Name = \"user\",\n Instance = instance.Name,\n Host = \"%\",\n Password = pwd.Result,\n });\n\n var sourceConnectionProfile = new Gcp.Datastream.ConnectionProfile(\"source_connection_profile\", new()\n {\n DisplayName = \"Source connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"source-profile\",\n MysqlProfile = new Gcp.Datastream.Inputs.ConnectionProfileMysqlProfileArgs\n {\n Hostname = instance.PublicIpAddress,\n Username = user.Name,\n Password = user.Password,\n },\n });\n\n var destinationConnectionProfile = new Gcp.Datastream.ConnectionProfile(\"destination_connection_profile\", new()\n {\n DisplayName = \"Connection profile\",\n Location = \"us-central1\",\n ConnectionProfileId = \"destination-profile\",\n BigqueryProfile = null,\n });\n\n var @default = new Gcp.Datastream.Stream(\"default\", new()\n {\n StreamId = \"my-stream\",\n Location = \"us-central1\",\n DisplayName = \"my stream\",\n SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs\n {\n SourceConnectionProfile = sourceConnectionProfile.Id,\n MysqlSourceConfig = null,\n },\n DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs\n {\n DestinationConnectionProfile = destinationConnectionProfile.Id,\n BigqueryDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigArgs\n {\n SourceHierarchyDatasets = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs\n {\n DatasetTemplate = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs\n {\n Location = \"us-central1\",\n },\n },\n AppendOnly = null,\n },\n },\n BackfillNone = null,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql\"\n\t\"github.com/pulumi/pulumi-random/sdk/v4/go/random\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinstance, err := sql.NewDatabaseInstance(ctx, \"instance\", \u0026sql.DatabaseInstanceArgs{\n\t\t\tName: pulumi.String(\"my-instance\"),\n\t\t\tDatabaseVersion: pulumi.String(\"MYSQL_8_0\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tSettings: \u0026sql.DatabaseInstanceSettingsArgs{\n\t\t\t\tTier: pulumi.String(\"db-f1-micro\"),\n\t\t\t\tBackupConfiguration: \u0026sql.DatabaseInstanceSettingsBackupConfigurationArgs{\n\t\t\t\t\tEnabled: pulumi.Bool(true),\n\t\t\t\t\tBinaryLogEnabled: pulumi.Bool(true),\n\t\t\t\t},\n\t\t\t\tIpConfiguration: \u0026sql.DatabaseInstanceSettingsIpConfigurationArgs{\n\t\t\t\t\tAuthorizedNetworks: sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArray{\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.71.242.81\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.28.29\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.6.157\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.67.234.134\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{\n\t\t\t\t\t\t\tValue: pulumi.String(\"34.72.239.218\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDeletionProtection: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = sql.NewDatabase(ctx, \"db\", \u0026sql.DatabaseArgs{\n\t\t\tInstance: instance.Name,\n\t\t\tName: pulumi.String(\"db\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpwd, err := random.NewRandomPassword(ctx, \"pwd\", \u0026random.RandomPasswordArgs{\n\t\t\tLength: pulumi.Int(16),\n\t\t\tSpecial: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tuser, err := sql.NewUser(ctx, \"user\", \u0026sql.UserArgs{\n\t\t\tName: pulumi.String(\"user\"),\n\t\t\tInstance: instance.Name,\n\t\t\tHost: pulumi.String(\"%\"),\n\t\t\tPassword: pwd.Result,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsourceConnectionProfile, err := datastream.NewConnectionProfile(ctx, \"source_connection_profile\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Source connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"source-profile\"),\n\t\t\tMysqlProfile: \u0026datastream.ConnectionProfileMysqlProfileArgs{\n\t\t\t\tHostname: instance.PublicIpAddress,\n\t\t\t\tUsername: user.Name,\n\t\t\t\tPassword: user.Password,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdestinationConnectionProfile, err := datastream.NewConnectionProfile(ctx, \"destination_connection_profile\", \u0026datastream.ConnectionProfileArgs{\n\t\t\tDisplayName: pulumi.String(\"Connection profile\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tConnectionProfileId: pulumi.String(\"destination-profile\"),\n\t\t\tBigqueryProfile: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = datastream.NewStream(ctx, \"default\", \u0026datastream.StreamArgs{\n\t\t\tStreamId: pulumi.String(\"my-stream\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDisplayName: pulumi.String(\"my stream\"),\n\t\t\tSourceConfig: \u0026datastream.StreamSourceConfigArgs{\n\t\t\t\tSourceConnectionProfile: sourceConnectionProfile.ID(),\n\t\t\t\tMysqlSourceConfig: nil,\n\t\t\t},\n\t\t\tDestinationConfig: \u0026datastream.StreamDestinationConfigArgs{\n\t\t\t\tDestinationConnectionProfile: destinationConnectionProfile.ID(),\n\t\t\t\tBigqueryDestinationConfig: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigArgs{\n\t\t\t\t\tSourceHierarchyDatasets: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs{\n\t\t\t\t\t\tDatasetTemplate: \u0026datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs{\n\t\t\t\t\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tAppendOnly: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\tBackfillNone: nil,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.sql.DatabaseInstance;\nimport com.pulumi.gcp.sql.DatabaseInstanceArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsBackupConfigurationArgs;\nimport com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsIpConfigurationArgs;\nimport com.pulumi.gcp.sql.Database;\nimport com.pulumi.gcp.sql.DatabaseArgs;\nimport com.pulumi.random.RandomPassword;\nimport com.pulumi.random.RandomPasswordArgs;\nimport com.pulumi.gcp.sql.User;\nimport com.pulumi.gcp.sql.UserArgs;\nimport com.pulumi.gcp.datastream.ConnectionProfile;\nimport com.pulumi.gcp.datastream.ConnectionProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileMysqlProfileArgs;\nimport com.pulumi.gcp.datastream.inputs.ConnectionProfileBigqueryProfileArgs;\nimport com.pulumi.gcp.datastream.Stream;\nimport com.pulumi.gcp.datastream.StreamArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamSourceConfigMysqlSourceConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigAppendOnlyArgs;\nimport com.pulumi.gcp.datastream.inputs.StreamBackfillNoneArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var instance = new DatabaseInstance(\"instance\", DatabaseInstanceArgs.builder()\n .name(\"my-instance\")\n .databaseVersion(\"MYSQL_8_0\")\n .region(\"us-central1\")\n .settings(DatabaseInstanceSettingsArgs.builder()\n .tier(\"db-f1-micro\")\n .backupConfiguration(DatabaseInstanceSettingsBackupConfigurationArgs.builder()\n .enabled(true)\n .binaryLogEnabled(true)\n .build())\n .ipConfiguration(DatabaseInstanceSettingsIpConfigurationArgs.builder()\n .authorizedNetworks( \n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.71.242.81\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.28.29\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.6.157\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.67.234.134\")\n .build(),\n DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()\n .value(\"34.72.239.218\")\n .build())\n .build())\n .build())\n .deletionProtection(true)\n .build());\n\n var db = new Database(\"db\", DatabaseArgs.builder()\n .instance(instance.name())\n .name(\"db\")\n .build());\n\n var pwd = new RandomPassword(\"pwd\", RandomPasswordArgs.builder()\n .length(16)\n .special(false)\n .build());\n\n var user = new User(\"user\", UserArgs.builder()\n .name(\"user\")\n .instance(instance.name())\n .host(\"%\")\n .password(pwd.result())\n .build());\n\n var sourceConnectionProfile = new ConnectionProfile(\"sourceConnectionProfile\", ConnectionProfileArgs.builder()\n .displayName(\"Source connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"source-profile\")\n .mysqlProfile(ConnectionProfileMysqlProfileArgs.builder()\n .hostname(instance.publicIpAddress())\n .username(user.name())\n .password(user.password())\n .build())\n .build());\n\n var destinationConnectionProfile = new ConnectionProfile(\"destinationConnectionProfile\", ConnectionProfileArgs.builder()\n .displayName(\"Connection profile\")\n .location(\"us-central1\")\n .connectionProfileId(\"destination-profile\")\n .bigqueryProfile()\n .build());\n\n var default_ = new Stream(\"default\", StreamArgs.builder()\n .streamId(\"my-stream\")\n .location(\"us-central1\")\n .displayName(\"my stream\")\n .sourceConfig(StreamSourceConfigArgs.builder()\n .sourceConnectionProfile(sourceConnectionProfile.id())\n .mysqlSourceConfig()\n .build())\n .destinationConfig(StreamDestinationConfigArgs.builder()\n .destinationConnectionProfile(destinationConnectionProfile.id())\n .bigqueryDestinationConfig(StreamDestinationConfigBigqueryDestinationConfigArgs.builder()\n .sourceHierarchyDatasets(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs.builder()\n .datasetTemplate(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs.builder()\n .location(\"us-central1\")\n .build())\n .build())\n .appendOnly()\n .build())\n .build())\n .backfillNone()\n .build());\n\n }\n}\n```\n```yaml\nresources:\n instance:\n type: gcp:sql:DatabaseInstance\n properties:\n name: my-instance\n databaseVersion: MYSQL_8_0\n region: us-central1\n settings:\n tier: db-f1-micro\n backupConfiguration:\n enabled: true\n binaryLogEnabled: true\n ipConfiguration:\n authorizedNetworks:\n - value: 34.71.242.81\n - value: 34.72.28.29\n - value: 34.67.6.157\n - value: 34.67.234.134\n - value: 34.72.239.218\n deletionProtection: true\n db:\n type: gcp:sql:Database\n properties:\n instance: ${instance.name}\n name: db\n pwd:\n type: random:RandomPassword\n properties:\n length: 16\n special: false\n user:\n type: gcp:sql:User\n properties:\n name: user\n instance: ${instance.name}\n host: '%'\n password: ${pwd.result}\n sourceConnectionProfile:\n type: gcp:datastream:ConnectionProfile\n name: source_connection_profile\n properties:\n displayName: Source connection profile\n location: us-central1\n connectionProfileId: source-profile\n mysqlProfile:\n hostname: ${instance.publicIpAddress}\n username: ${user.name}\n password: ${user.password}\n destinationConnectionProfile:\n type: gcp:datastream:ConnectionProfile\n name: destination_connection_profile\n properties:\n displayName: Connection profile\n location: us-central1\n connectionProfileId: destination-profile\n bigqueryProfile: {}\n default:\n type: gcp:datastream:Stream\n properties:\n streamId: my-stream\n location: us-central1\n displayName: my stream\n sourceConfig:\n sourceConnectionProfile: ${sourceConnectionProfile.id}\n mysqlSourceConfig: {}\n destinationConfig:\n destinationConnectionProfile: ${destinationConnectionProfile.id}\n bigqueryDestinationConfig:\n sourceHierarchyDatasets:\n datasetTemplate:\n location: us-central1\n appendOnly: {}\n backfillNone: {}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nStream can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/streams/{{stream_id}}`\n\n* `{{project}}/{{location}}/{{stream_id}}`\n\n* `{{location}}/{{stream_id}}`\n\nWhen using the `pulumi import` command, Stream can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:datastream/stream:Stream default projects/{{project}}/locations/{{location}}/streams/{{stream_id}}\n```\n\n```sh\n$ pulumi import gcp:datastream/stream:Stream default {{project}}/{{location}}/{{stream_id}}\n```\n\n```sh\n$ pulumi import gcp:datastream/stream:Stream default {{location}}/{{stream_id}}\n```\n\n", "properties": { "backfillAll": { "$ref": "#/types/gcp:datastream/StreamBackfillAll:StreamBackfillAll", @@ -185233,7 +186801,7 @@ }, "desiredState": { "type": "string", - "description": "Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.\n" + "description": "Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible\nvalues: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED\n" }, "destinationConfig": { "$ref": "#/types/gcp:datastream/StreamDestinationConfig:StreamDestinationConfig", @@ -185323,7 +186891,7 @@ }, "desiredState": { "type": "string", - "description": "Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.\n" + "description": "Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible\nvalues: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED\n" }, "destinationConfig": { "$ref": "#/types/gcp:datastream/StreamDestinationConfig:StreamDestinationConfig", @@ -185389,7 +186957,7 @@ }, "desiredState": { "type": "string", - "description": "Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.\n" + "description": "Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible\nvalues: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED\n" }, "destinationConfig": { "$ref": "#/types/gcp:datastream/StreamDestinationConfig:StreamDestinationConfig", @@ -188127,7 +189695,7 @@ }, "industryVertical": { "type": "string", - "description": "The industry vertical that the data store registers.\nPossible values are: `GENERIC`, `MEDIA`.\n" + "description": "The industry vertical that the data store registers.\nPossible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`.\n" }, "location": { "type": "string", @@ -188150,7 +189718,7 @@ "items": { "type": "string" }, - "description": "The solutions that the data store enrolls.\nEach value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.\n" + "description": "The solutions that the data store enrolls.\nEach value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.\n" } }, "required": [ @@ -188190,7 +189758,7 @@ }, "industryVertical": { "type": "string", - "description": "The industry vertical that the data store registers.\nPossible values are: `GENERIC`, `MEDIA`.\n", + "description": "The industry vertical that the data store registers.\nPossible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`.\n", "willReplaceOnChanges": true }, "location": { @@ -188212,7 +189780,7 @@ "items": { "type": "string" }, - "description": "The solutions that the data store enrolls.\nEach value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.\n", + "description": "The solutions that the data store enrolls.\nEach value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.\n", "willReplaceOnChanges": true } }, @@ -188259,7 +189827,7 @@ }, "industryVertical": { "type": "string", - "description": "The industry vertical that the data store registers.\nPossible values are: `GENERIC`, `MEDIA`.\n", + "description": "The industry vertical that the data store registers.\nPossible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`.\n", "willReplaceOnChanges": true }, "location": { @@ -188285,7 +189853,7 @@ "items": { "type": "string" }, - "description": "The solutions that the data store enrolls.\nEach value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.\n", + "description": "The solutions that the data store enrolls.\nEach value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.\n", "willReplaceOnChanges": true } }, @@ -193944,7 +195512,7 @@ }, "desiredState": { "type": "string", - "description": "The intended database state.\n" + "description": "The intended database state. Possible values: ACTIVE, DISABLED.\n" }, "instanceId": { "type": "string", @@ -193982,7 +195550,7 @@ "inputProperties": { "desiredState": { "type": "string", - "description": "The intended database state.\n" + "description": "The intended database state. Possible values: ACTIVE, DISABLED.\n" }, "instanceId": { "type": "string", @@ -194018,7 +195586,7 @@ }, "desiredState": { "type": "string", - "description": "The intended database state.\n" + "description": "The intended database state. Possible values: ACTIVE, DISABLED.\n" }, "instanceId": { "type": "string", @@ -198067,7 +199635,7 @@ } }, "gcp:gkehub/featureMembership:FeatureMembership": { - "description": "Contains information about a GKEHub Feature Memberships. Feature Memberships configure GKEHub Features that apply to specific memberships rather than the project as a whole. The google_gke_hub is the Fleet API.\n\n## Example Usage\n\n### Config Management\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst cluster = new gcp.container.Cluster(\"cluster\", {\n name: \"my-cluster\",\n location: \"us-central1-a\",\n initialNodeCount: 1,\n});\nconst membership = new gcp.gkehub.Membership(\"membership\", {\n membershipId: \"my-membership\",\n endpoint: {\n gkeCluster: {\n resourceLink: pulumi.interpolate`//container.googleapis.com/${cluster.id}`,\n },\n },\n});\nconst feature = new gcp.gkehub.Feature(\"feature\", {\n name: \"configmanagement\",\n location: \"global\",\n labels: {\n foo: \"bar\",\n },\n});\nconst featureMember = new gcp.gkehub.FeatureMembership(\"feature_member\", {\n location: \"global\",\n feature: feature.name,\n membership: membership.membershipId,\n configmanagement: {\n version: \"1.6.2\",\n configSync: {\n git: {\n syncRepo: \"https://github.com/hashicorp/terraform\",\n },\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncluster = gcp.container.Cluster(\"cluster\",\n name=\"my-cluster\",\n location=\"us-central1-a\",\n initial_node_count=1)\nmembership = gcp.gkehub.Membership(\"membership\",\n membership_id=\"my-membership\",\n endpoint={\n \"gke_cluster\": {\n \"resource_link\": cluster.id.apply(lambda id: f\"//container.googleapis.com/{id}\"),\n },\n })\nfeature = gcp.gkehub.Feature(\"feature\",\n name=\"configmanagement\",\n location=\"global\",\n labels={\n \"foo\": \"bar\",\n })\nfeature_member = gcp.gkehub.FeatureMembership(\"feature_member\",\n location=\"global\",\n feature=feature.name,\n membership=membership.membership_id,\n configmanagement={\n \"version\": \"1.6.2\",\n \"config_sync\": {\n \"git\": {\n \"sync_repo\": \"https://github.com/hashicorp/terraform\",\n },\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var cluster = new Gcp.Container.Cluster(\"cluster\", new()\n {\n Name = \"my-cluster\",\n Location = \"us-central1-a\",\n InitialNodeCount = 1,\n });\n\n var membership = new Gcp.GkeHub.Membership(\"membership\", new()\n {\n MembershipId = \"my-membership\",\n Endpoint = new Gcp.GkeHub.Inputs.MembershipEndpointArgs\n {\n GkeCluster = new Gcp.GkeHub.Inputs.MembershipEndpointGkeClusterArgs\n {\n ResourceLink = cluster.Id.Apply(id =\u003e $\"//container.googleapis.com/{id}\"),\n },\n },\n });\n\n var feature = new Gcp.GkeHub.Feature(\"feature\", new()\n {\n Name = \"configmanagement\",\n Location = \"global\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n });\n\n var featureMember = new Gcp.GkeHub.FeatureMembership(\"feature_member\", new()\n {\n Location = \"global\",\n Feature = feature.Name,\n Membership = membership.MembershipId,\n Configmanagement = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementArgs\n {\n Version = \"1.6.2\",\n ConfigSync = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncArgs\n {\n Git = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncGitArgs\n {\n SyncRepo = \"https://github.com/hashicorp/terraform\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkehub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcluster, err := container.NewCluster(ctx, \"cluster\", \u0026container.ClusterArgs{\n\t\t\tName: pulumi.String(\"my-cluster\"),\n\t\t\tLocation: pulumi.String(\"us-central1-a\"),\n\t\t\tInitialNodeCount: pulumi.Int(1),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmembership, err := gkehub.NewMembership(ctx, \"membership\", \u0026gkehub.MembershipArgs{\n\t\t\tMembershipId: pulumi.String(\"my-membership\"),\n\t\t\tEndpoint: \u0026gkehub.MembershipEndpointArgs{\n\t\t\t\tGkeCluster: \u0026gkehub.MembershipEndpointGkeClusterArgs{\n\t\t\t\t\tResourceLink: cluster.ID().ApplyT(func(id string) (string, error) {\n\t\t\t\t\t\treturn fmt.Sprintf(\"//container.googleapis.com/%v\", id), nil\n\t\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfeature, err := gkehub.NewFeature(ctx, \"feature\", \u0026gkehub.FeatureArgs{\n\t\t\tName: pulumi.String(\"configmanagement\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = gkehub.NewFeatureMembership(ctx, \"feature_member\", \u0026gkehub.FeatureMembershipArgs{\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tFeature: feature.Name,\n\t\t\tMembership: membership.MembershipId,\n\t\t\tConfigmanagement: \u0026gkehub.FeatureMembershipConfigmanagementArgs{\n\t\t\t\tVersion: pulumi.String(\"1.6.2\"),\n\t\t\t\tConfigSync: \u0026gkehub.FeatureMembershipConfigmanagementConfigSyncArgs{\n\t\t\t\t\tGit: \u0026gkehub.FeatureMembershipConfigmanagementConfigSyncGitArgs{\n\t\t\t\t\t\tSyncRepo: pulumi.String(\"https://github.com/hashicorp/terraform\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.container.Cluster;\nimport com.pulumi.gcp.container.ClusterArgs;\nimport com.pulumi.gcp.gkehub.Membership;\nimport com.pulumi.gcp.gkehub.MembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointGkeClusterArgs;\nimport com.pulumi.gcp.gkehub.Feature;\nimport com.pulumi.gcp.gkehub.FeatureArgs;\nimport com.pulumi.gcp.gkehub.FeatureMembership;\nimport com.pulumi.gcp.gkehub.FeatureMembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementConfigSyncArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementConfigSyncGitArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var cluster = new Cluster(\"cluster\", ClusterArgs.builder()\n .name(\"my-cluster\")\n .location(\"us-central1-a\")\n .initialNodeCount(1)\n .build());\n\n var membership = new Membership(\"membership\", MembershipArgs.builder()\n .membershipId(\"my-membership\")\n .endpoint(MembershipEndpointArgs.builder()\n .gkeCluster(MembershipEndpointGkeClusterArgs.builder()\n .resourceLink(cluster.id().applyValue(id -\u003e String.format(\"//container.googleapis.com/%s\", id)))\n .build())\n .build())\n .build());\n\n var feature = new Feature(\"feature\", FeatureArgs.builder()\n .name(\"configmanagement\")\n .location(\"global\")\n .labels(Map.of(\"foo\", \"bar\"))\n .build());\n\n var featureMember = new FeatureMembership(\"featureMember\", FeatureMembershipArgs.builder()\n .location(\"global\")\n .feature(feature.name())\n .membership(membership.membershipId())\n .configmanagement(FeatureMembershipConfigmanagementArgs.builder()\n .version(\"1.6.2\")\n .configSync(FeatureMembershipConfigmanagementConfigSyncArgs.builder()\n .git(FeatureMembershipConfigmanagementConfigSyncGitArgs.builder()\n .syncRepo(\"https://github.com/hashicorp/terraform\")\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster:\n type: gcp:container:Cluster\n properties:\n name: my-cluster\n location: us-central1-a\n initialNodeCount: 1\n membership:\n type: gcp:gkehub:Membership\n properties:\n membershipId: my-membership\n endpoint:\n gkeCluster:\n resourceLink: //container.googleapis.com/${cluster.id}\n feature:\n type: gcp:gkehub:Feature\n properties:\n name: configmanagement\n location: global\n labels:\n foo: bar\n featureMember:\n type: gcp:gkehub:FeatureMembership\n name: feature_member\n properties:\n location: global\n feature: ${feature.name}\n membership: ${membership.membershipId}\n configmanagement:\n version: 1.6.2\n configSync:\n git:\n syncRepo: https://github.com/hashicorp/terraform\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Config Management With OCI\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst cluster = new gcp.container.Cluster(\"cluster\", {\n name: \"my-cluster\",\n location: \"us-central1-a\",\n initialNodeCount: 1,\n});\nconst membership = new gcp.gkehub.Membership(\"membership\", {\n membershipId: \"my-membership\",\n endpoint: {\n gkeCluster: {\n resourceLink: pulumi.interpolate`//container.googleapis.com/${cluster.id}`,\n },\n },\n});\nconst feature = new gcp.gkehub.Feature(\"feature\", {\n name: \"configmanagement\",\n location: \"global\",\n labels: {\n foo: \"bar\",\n },\n});\nconst featureMember = new gcp.gkehub.FeatureMembership(\"feature_member\", {\n location: \"global\",\n feature: feature.name,\n membership: membership.membershipId,\n configmanagement: {\n version: \"1.15.1\",\n configSync: {\n oci: {\n syncRepo: \"us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest\",\n policyDir: \"config-connector\",\n syncWaitSecs: \"20\",\n secretType: \"gcpserviceaccount\",\n gcpServiceAccountEmail: \"sa@project-id.iam.gserviceaccount.com\",\n },\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncluster = gcp.container.Cluster(\"cluster\",\n name=\"my-cluster\",\n location=\"us-central1-a\",\n initial_node_count=1)\nmembership = gcp.gkehub.Membership(\"membership\",\n membership_id=\"my-membership\",\n endpoint={\n \"gke_cluster\": {\n \"resource_link\": cluster.id.apply(lambda id: f\"//container.googleapis.com/{id}\"),\n },\n })\nfeature = gcp.gkehub.Feature(\"feature\",\n name=\"configmanagement\",\n location=\"global\",\n labels={\n \"foo\": \"bar\",\n })\nfeature_member = gcp.gkehub.FeatureMembership(\"feature_member\",\n location=\"global\",\n feature=feature.name,\n membership=membership.membership_id,\n configmanagement={\n \"version\": \"1.15.1\",\n \"config_sync\": {\n \"oci\": {\n \"sync_repo\": \"us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest\",\n \"policy_dir\": \"config-connector\",\n \"sync_wait_secs\": \"20\",\n \"secret_type\": \"gcpserviceaccount\",\n \"gcp_service_account_email\": \"sa@project-id.iam.gserviceaccount.com\",\n },\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var cluster = new Gcp.Container.Cluster(\"cluster\", new()\n {\n Name = \"my-cluster\",\n Location = \"us-central1-a\",\n InitialNodeCount = 1,\n });\n\n var membership = new Gcp.GkeHub.Membership(\"membership\", new()\n {\n MembershipId = \"my-membership\",\n Endpoint = new Gcp.GkeHub.Inputs.MembershipEndpointArgs\n {\n GkeCluster = new Gcp.GkeHub.Inputs.MembershipEndpointGkeClusterArgs\n {\n ResourceLink = cluster.Id.Apply(id =\u003e $\"//container.googleapis.com/{id}\"),\n },\n },\n });\n\n var feature = new Gcp.GkeHub.Feature(\"feature\", new()\n {\n Name = \"configmanagement\",\n Location = \"global\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n });\n\n var featureMember = new Gcp.GkeHub.FeatureMembership(\"feature_member\", new()\n {\n Location = \"global\",\n Feature = feature.Name,\n Membership = membership.MembershipId,\n Configmanagement = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementArgs\n {\n Version = \"1.15.1\",\n ConfigSync = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncArgs\n {\n Oci = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncOciArgs\n {\n SyncRepo = \"us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest\",\n PolicyDir = \"config-connector\",\n SyncWaitSecs = \"20\",\n SecretType = \"gcpserviceaccount\",\n GcpServiceAccountEmail = \"sa@project-id.iam.gserviceaccount.com\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkehub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcluster, err := container.NewCluster(ctx, \"cluster\", \u0026container.ClusterArgs{\n\t\t\tName: pulumi.String(\"my-cluster\"),\n\t\t\tLocation: pulumi.String(\"us-central1-a\"),\n\t\t\tInitialNodeCount: pulumi.Int(1),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmembership, err := gkehub.NewMembership(ctx, \"membership\", \u0026gkehub.MembershipArgs{\n\t\t\tMembershipId: pulumi.String(\"my-membership\"),\n\t\t\tEndpoint: \u0026gkehub.MembershipEndpointArgs{\n\t\t\t\tGkeCluster: \u0026gkehub.MembershipEndpointGkeClusterArgs{\n\t\t\t\t\tResourceLink: cluster.ID().ApplyT(func(id string) (string, error) {\n\t\t\t\t\t\treturn fmt.Sprintf(\"//container.googleapis.com/%v\", id), nil\n\t\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfeature, err := gkehub.NewFeature(ctx, \"feature\", \u0026gkehub.FeatureArgs{\n\t\t\tName: pulumi.String(\"configmanagement\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = gkehub.NewFeatureMembership(ctx, \"feature_member\", \u0026gkehub.FeatureMembershipArgs{\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tFeature: feature.Name,\n\t\t\tMembership: membership.MembershipId,\n\t\t\tConfigmanagement: \u0026gkehub.FeatureMembershipConfigmanagementArgs{\n\t\t\t\tVersion: pulumi.String(\"1.15.1\"),\n\t\t\t\tConfigSync: \u0026gkehub.FeatureMembershipConfigmanagementConfigSyncArgs{\n\t\t\t\t\tOci: \u0026gkehub.FeatureMembershipConfigmanagementConfigSyncOciArgs{\n\t\t\t\t\t\tSyncRepo: pulumi.String(\"us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest\"),\n\t\t\t\t\t\tPolicyDir: pulumi.String(\"config-connector\"),\n\t\t\t\t\t\tSyncWaitSecs: pulumi.String(\"20\"),\n\t\t\t\t\t\tSecretType: pulumi.String(\"gcpserviceaccount\"),\n\t\t\t\t\t\tGcpServiceAccountEmail: pulumi.String(\"sa@project-id.iam.gserviceaccount.com\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.container.Cluster;\nimport com.pulumi.gcp.container.ClusterArgs;\nimport com.pulumi.gcp.gkehub.Membership;\nimport com.pulumi.gcp.gkehub.MembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointGkeClusterArgs;\nimport com.pulumi.gcp.gkehub.Feature;\nimport com.pulumi.gcp.gkehub.FeatureArgs;\nimport com.pulumi.gcp.gkehub.FeatureMembership;\nimport com.pulumi.gcp.gkehub.FeatureMembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementConfigSyncArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementConfigSyncOciArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var cluster = new Cluster(\"cluster\", ClusterArgs.builder()\n .name(\"my-cluster\")\n .location(\"us-central1-a\")\n .initialNodeCount(1)\n .build());\n\n var membership = new Membership(\"membership\", MembershipArgs.builder()\n .membershipId(\"my-membership\")\n .endpoint(MembershipEndpointArgs.builder()\n .gkeCluster(MembershipEndpointGkeClusterArgs.builder()\n .resourceLink(cluster.id().applyValue(id -\u003e String.format(\"//container.googleapis.com/%s\", id)))\n .build())\n .build())\n .build());\n\n var feature = new Feature(\"feature\", FeatureArgs.builder()\n .name(\"configmanagement\")\n .location(\"global\")\n .labels(Map.of(\"foo\", \"bar\"))\n .build());\n\n var featureMember = new FeatureMembership(\"featureMember\", FeatureMembershipArgs.builder()\n .location(\"global\")\n .feature(feature.name())\n .membership(membership.membershipId())\n .configmanagement(FeatureMembershipConfigmanagementArgs.builder()\n .version(\"1.15.1\")\n .configSync(FeatureMembershipConfigmanagementConfigSyncArgs.builder()\n .oci(FeatureMembershipConfigmanagementConfigSyncOciArgs.builder()\n .syncRepo(\"us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest\")\n .policyDir(\"config-connector\")\n .syncWaitSecs(\"20\")\n .secretType(\"gcpserviceaccount\")\n .gcpServiceAccountEmail(\"sa@project-id.iam.gserviceaccount.com\")\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster:\n type: gcp:container:Cluster\n properties:\n name: my-cluster\n location: us-central1-a\n initialNodeCount: 1\n membership:\n type: gcp:gkehub:Membership\n properties:\n membershipId: my-membership\n endpoint:\n gkeCluster:\n resourceLink: //container.googleapis.com/${cluster.id}\n feature:\n type: gcp:gkehub:Feature\n properties:\n name: configmanagement\n location: global\n labels:\n foo: bar\n featureMember:\n type: gcp:gkehub:FeatureMembership\n name: feature_member\n properties:\n location: global\n feature: ${feature.name}\n membership: ${membership.membershipId}\n configmanagement:\n version: 1.15.1\n configSync:\n oci:\n syncRepo: us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest\n policyDir: config-connector\n syncWaitSecs: '20'\n secretType: gcpserviceaccount\n gcpServiceAccountEmail: sa@project-id.iam.gserviceaccount.com\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Multi Cluster Service Discovery\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst feature = new gcp.gkehub.Feature(\"feature\", {\n name: \"multiclusterservicediscovery\",\n location: \"global\",\n labels: {\n foo: \"bar\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nfeature = gcp.gkehub.Feature(\"feature\",\n name=\"multiclusterservicediscovery\",\n location=\"global\",\n labels={\n \"foo\": \"bar\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var feature = new Gcp.GkeHub.Feature(\"feature\", new()\n {\n Name = \"multiclusterservicediscovery\",\n Location = \"global\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkehub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := gkehub.NewFeature(ctx, \"feature\", \u0026gkehub.FeatureArgs{\n\t\t\tName: pulumi.String(\"multiclusterservicediscovery\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.gkehub.Feature;\nimport com.pulumi.gcp.gkehub.FeatureArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var feature = new Feature(\"feature\", FeatureArgs.builder()\n .name(\"multiclusterservicediscovery\")\n .location(\"global\")\n .labels(Map.of(\"foo\", \"bar\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n feature:\n type: gcp:gkehub:Feature\n properties:\n name: multiclusterservicediscovery\n location: global\n labels:\n foo: bar\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Service Mesh\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst cluster = new gcp.container.Cluster(\"cluster\", {\n name: \"my-cluster\",\n location: \"us-central1-a\",\n initialNodeCount: 1,\n});\nconst membership = new gcp.gkehub.Membership(\"membership\", {\n membershipId: \"my-membership\",\n endpoint: {\n gkeCluster: {\n resourceLink: pulumi.interpolate`//container.googleapis.com/${cluster.id}`,\n },\n },\n});\nconst feature = new gcp.gkehub.Feature(\"feature\", {\n name: \"servicemesh\",\n location: \"global\",\n});\nconst featureMember = new gcp.gkehub.FeatureMembership(\"feature_member\", {\n location: \"global\",\n feature: feature.name,\n membership: membership.membershipId,\n mesh: {\n management: \"MANAGEMENT_AUTOMATIC\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncluster = gcp.container.Cluster(\"cluster\",\n name=\"my-cluster\",\n location=\"us-central1-a\",\n initial_node_count=1)\nmembership = gcp.gkehub.Membership(\"membership\",\n membership_id=\"my-membership\",\n endpoint={\n \"gke_cluster\": {\n \"resource_link\": cluster.id.apply(lambda id: f\"//container.googleapis.com/{id}\"),\n },\n })\nfeature = gcp.gkehub.Feature(\"feature\",\n name=\"servicemesh\",\n location=\"global\")\nfeature_member = gcp.gkehub.FeatureMembership(\"feature_member\",\n location=\"global\",\n feature=feature.name,\n membership=membership.membership_id,\n mesh={\n \"management\": \"MANAGEMENT_AUTOMATIC\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var cluster = new Gcp.Container.Cluster(\"cluster\", new()\n {\n Name = \"my-cluster\",\n Location = \"us-central1-a\",\n InitialNodeCount = 1,\n });\n\n var membership = new Gcp.GkeHub.Membership(\"membership\", new()\n {\n MembershipId = \"my-membership\",\n Endpoint = new Gcp.GkeHub.Inputs.MembershipEndpointArgs\n {\n GkeCluster = new Gcp.GkeHub.Inputs.MembershipEndpointGkeClusterArgs\n {\n ResourceLink = cluster.Id.Apply(id =\u003e $\"//container.googleapis.com/{id}\"),\n },\n },\n });\n\n var feature = new Gcp.GkeHub.Feature(\"feature\", new()\n {\n Name = \"servicemesh\",\n Location = \"global\",\n });\n\n var featureMember = new Gcp.GkeHub.FeatureMembership(\"feature_member\", new()\n {\n Location = \"global\",\n Feature = feature.Name,\n Membership = membership.MembershipId,\n Mesh = new Gcp.GkeHub.Inputs.FeatureMembershipMeshArgs\n {\n Management = \"MANAGEMENT_AUTOMATIC\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkehub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcluster, err := container.NewCluster(ctx, \"cluster\", \u0026container.ClusterArgs{\n\t\t\tName: pulumi.String(\"my-cluster\"),\n\t\t\tLocation: pulumi.String(\"us-central1-a\"),\n\t\t\tInitialNodeCount: pulumi.Int(1),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmembership, err := gkehub.NewMembership(ctx, \"membership\", \u0026gkehub.MembershipArgs{\n\t\t\tMembershipId: pulumi.String(\"my-membership\"),\n\t\t\tEndpoint: \u0026gkehub.MembershipEndpointArgs{\n\t\t\t\tGkeCluster: \u0026gkehub.MembershipEndpointGkeClusterArgs{\n\t\t\t\t\tResourceLink: cluster.ID().ApplyT(func(id string) (string, error) {\n\t\t\t\t\t\treturn fmt.Sprintf(\"//container.googleapis.com/%v\", id), nil\n\t\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfeature, err := gkehub.NewFeature(ctx, \"feature\", \u0026gkehub.FeatureArgs{\n\t\t\tName: pulumi.String(\"servicemesh\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = gkehub.NewFeatureMembership(ctx, \"feature_member\", \u0026gkehub.FeatureMembershipArgs{\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tFeature: feature.Name,\n\t\t\tMembership: membership.MembershipId,\n\t\t\tMesh: \u0026gkehub.FeatureMembershipMeshArgs{\n\t\t\t\tManagement: pulumi.String(\"MANAGEMENT_AUTOMATIC\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.container.Cluster;\nimport com.pulumi.gcp.container.ClusterArgs;\nimport com.pulumi.gcp.gkehub.Membership;\nimport com.pulumi.gcp.gkehub.MembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointGkeClusterArgs;\nimport com.pulumi.gcp.gkehub.Feature;\nimport com.pulumi.gcp.gkehub.FeatureArgs;\nimport com.pulumi.gcp.gkehub.FeatureMembership;\nimport com.pulumi.gcp.gkehub.FeatureMembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipMeshArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var cluster = new Cluster(\"cluster\", ClusterArgs.builder()\n .name(\"my-cluster\")\n .location(\"us-central1-a\")\n .initialNodeCount(1)\n .build());\n\n var membership = new Membership(\"membership\", MembershipArgs.builder()\n .membershipId(\"my-membership\")\n .endpoint(MembershipEndpointArgs.builder()\n .gkeCluster(MembershipEndpointGkeClusterArgs.builder()\n .resourceLink(cluster.id().applyValue(id -\u003e String.format(\"//container.googleapis.com/%s\", id)))\n .build())\n .build())\n .build());\n\n var feature = new Feature(\"feature\", FeatureArgs.builder()\n .name(\"servicemesh\")\n .location(\"global\")\n .build());\n\n var featureMember = new FeatureMembership(\"featureMember\", FeatureMembershipArgs.builder()\n .location(\"global\")\n .feature(feature.name())\n .membership(membership.membershipId())\n .mesh(FeatureMembershipMeshArgs.builder()\n .management(\"MANAGEMENT_AUTOMATIC\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster:\n type: gcp:container:Cluster\n properties:\n name: my-cluster\n location: us-central1-a\n initialNodeCount: 1\n membership:\n type: gcp:gkehub:Membership\n properties:\n membershipId: my-membership\n endpoint:\n gkeCluster:\n resourceLink: //container.googleapis.com/${cluster.id}\n feature:\n type: gcp:gkehub:Feature\n properties:\n name: servicemesh\n location: global\n featureMember:\n type: gcp:gkehub:FeatureMembership\n name: feature_member\n properties:\n location: global\n feature: ${feature.name}\n membership: ${membership.membershipId}\n mesh:\n management: MANAGEMENT_AUTOMATIC\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Config Management With Regional Membership\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst cluster = new gcp.container.Cluster(\"cluster\", {\n name: \"my-cluster\",\n location: \"us-central1-a\",\n initialNodeCount: 1,\n});\nconst membership = new gcp.gkehub.Membership(\"membership\", {\n membershipId: \"my-membership\",\n location: \"us-central1\",\n endpoint: {\n gkeCluster: {\n resourceLink: pulumi.interpolate`//container.googleapis.com/${cluster.id}`,\n },\n },\n});\nconst feature = new gcp.gkehub.Feature(\"feature\", {\n name: \"configmanagement\",\n location: \"global\",\n labels: {\n foo: \"bar\",\n },\n});\nconst featureMember = new gcp.gkehub.FeatureMembership(\"feature_member\", {\n location: \"global\",\n feature: feature.name,\n membership: membership.membershipId,\n membershipLocation: membership.location,\n configmanagement: {\n version: \"1.6.2\",\n configSync: {\n git: {\n syncRepo: \"https://github.com/hashicorp/terraform\",\n },\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncluster = gcp.container.Cluster(\"cluster\",\n name=\"my-cluster\",\n location=\"us-central1-a\",\n initial_node_count=1)\nmembership = gcp.gkehub.Membership(\"membership\",\n membership_id=\"my-membership\",\n location=\"us-central1\",\n endpoint={\n \"gke_cluster\": {\n \"resource_link\": cluster.id.apply(lambda id: f\"//container.googleapis.com/{id}\"),\n },\n })\nfeature = gcp.gkehub.Feature(\"feature\",\n name=\"configmanagement\",\n location=\"global\",\n labels={\n \"foo\": \"bar\",\n })\nfeature_member = gcp.gkehub.FeatureMembership(\"feature_member\",\n location=\"global\",\n feature=feature.name,\n membership=membership.membership_id,\n membership_location=membership.location,\n configmanagement={\n \"version\": \"1.6.2\",\n \"config_sync\": {\n \"git\": {\n \"sync_repo\": \"https://github.com/hashicorp/terraform\",\n },\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var cluster = new Gcp.Container.Cluster(\"cluster\", new()\n {\n Name = \"my-cluster\",\n Location = \"us-central1-a\",\n InitialNodeCount = 1,\n });\n\n var membership = new Gcp.GkeHub.Membership(\"membership\", new()\n {\n MembershipId = \"my-membership\",\n Location = \"us-central1\",\n Endpoint = new Gcp.GkeHub.Inputs.MembershipEndpointArgs\n {\n GkeCluster = new Gcp.GkeHub.Inputs.MembershipEndpointGkeClusterArgs\n {\n ResourceLink = cluster.Id.Apply(id =\u003e $\"//container.googleapis.com/{id}\"),\n },\n },\n });\n\n var feature = new Gcp.GkeHub.Feature(\"feature\", new()\n {\n Name = \"configmanagement\",\n Location = \"global\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n });\n\n var featureMember = new Gcp.GkeHub.FeatureMembership(\"feature_member\", new()\n {\n Location = \"global\",\n Feature = feature.Name,\n Membership = membership.MembershipId,\n MembershipLocation = membership.Location,\n Configmanagement = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementArgs\n {\n Version = \"1.6.2\",\n ConfigSync = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncArgs\n {\n Git = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncGitArgs\n {\n SyncRepo = \"https://github.com/hashicorp/terraform\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkehub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcluster, err := container.NewCluster(ctx, \"cluster\", \u0026container.ClusterArgs{\n\t\t\tName: pulumi.String(\"my-cluster\"),\n\t\t\tLocation: pulumi.String(\"us-central1-a\"),\n\t\t\tInitialNodeCount: pulumi.Int(1),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmembership, err := gkehub.NewMembership(ctx, \"membership\", \u0026gkehub.MembershipArgs{\n\t\t\tMembershipId: pulumi.String(\"my-membership\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tEndpoint: \u0026gkehub.MembershipEndpointArgs{\n\t\t\t\tGkeCluster: \u0026gkehub.MembershipEndpointGkeClusterArgs{\n\t\t\t\t\tResourceLink: cluster.ID().ApplyT(func(id string) (string, error) {\n\t\t\t\t\t\treturn fmt.Sprintf(\"//container.googleapis.com/%v\", id), nil\n\t\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfeature, err := gkehub.NewFeature(ctx, \"feature\", \u0026gkehub.FeatureArgs{\n\t\t\tName: pulumi.String(\"configmanagement\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = gkehub.NewFeatureMembership(ctx, \"feature_member\", \u0026gkehub.FeatureMembershipArgs{\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tFeature: feature.Name,\n\t\t\tMembership: membership.MembershipId,\n\t\t\tMembershipLocation: membership.Location,\n\t\t\tConfigmanagement: \u0026gkehub.FeatureMembershipConfigmanagementArgs{\n\t\t\t\tVersion: pulumi.String(\"1.6.2\"),\n\t\t\t\tConfigSync: \u0026gkehub.FeatureMembershipConfigmanagementConfigSyncArgs{\n\t\t\t\t\tGit: \u0026gkehub.FeatureMembershipConfigmanagementConfigSyncGitArgs{\n\t\t\t\t\t\tSyncRepo: pulumi.String(\"https://github.com/hashicorp/terraform\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.container.Cluster;\nimport com.pulumi.gcp.container.ClusterArgs;\nimport com.pulumi.gcp.gkehub.Membership;\nimport com.pulumi.gcp.gkehub.MembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointGkeClusterArgs;\nimport com.pulumi.gcp.gkehub.Feature;\nimport com.pulumi.gcp.gkehub.FeatureArgs;\nimport com.pulumi.gcp.gkehub.FeatureMembership;\nimport com.pulumi.gcp.gkehub.FeatureMembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementConfigSyncArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementConfigSyncGitArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var cluster = new Cluster(\"cluster\", ClusterArgs.builder()\n .name(\"my-cluster\")\n .location(\"us-central1-a\")\n .initialNodeCount(1)\n .build());\n\n var membership = new Membership(\"membership\", MembershipArgs.builder()\n .membershipId(\"my-membership\")\n .location(\"us-central1\")\n .endpoint(MembershipEndpointArgs.builder()\n .gkeCluster(MembershipEndpointGkeClusterArgs.builder()\n .resourceLink(cluster.id().applyValue(id -\u003e String.format(\"//container.googleapis.com/%s\", id)))\n .build())\n .build())\n .build());\n\n var feature = new Feature(\"feature\", FeatureArgs.builder()\n .name(\"configmanagement\")\n .location(\"global\")\n .labels(Map.of(\"foo\", \"bar\"))\n .build());\n\n var featureMember = new FeatureMembership(\"featureMember\", FeatureMembershipArgs.builder()\n .location(\"global\")\n .feature(feature.name())\n .membership(membership.membershipId())\n .membershipLocation(membership.location())\n .configmanagement(FeatureMembershipConfigmanagementArgs.builder()\n .version(\"1.6.2\")\n .configSync(FeatureMembershipConfigmanagementConfigSyncArgs.builder()\n .git(FeatureMembershipConfigmanagementConfigSyncGitArgs.builder()\n .syncRepo(\"https://github.com/hashicorp/terraform\")\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster:\n type: gcp:container:Cluster\n properties:\n name: my-cluster\n location: us-central1-a\n initialNodeCount: 1\n membership:\n type: gcp:gkehub:Membership\n properties:\n membershipId: my-membership\n location: us-central1\n endpoint:\n gkeCluster:\n resourceLink: //container.googleapis.com/${cluster.id}\n feature:\n type: gcp:gkehub:Feature\n properties:\n name: configmanagement\n location: global\n labels:\n foo: bar\n featureMember:\n type: gcp:gkehub:FeatureMembership\n name: feature_member\n properties:\n location: global\n feature: ${feature.name}\n membership: ${membership.membershipId}\n membershipLocation: ${membership.location}\n configmanagement:\n version: 1.6.2\n configSync:\n git:\n syncRepo: https://github.com/hashicorp/terraform\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Policy Controller With Minimal Configuration\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst cluster = new gcp.container.Cluster(\"cluster\", {\n name: \"my-cluster\",\n location: \"us-central1-a\",\n initialNodeCount: 1,\n});\nconst membership = new gcp.gkehub.Membership(\"membership\", {\n membershipId: \"my-membership\",\n endpoint: {\n gkeCluster: {\n resourceLink: pulumi.interpolate`//container.googleapis.com/${cluster.id}`,\n },\n },\n});\nconst feature = new gcp.gkehub.Feature(\"feature\", {\n name: \"policycontroller\",\n location: \"global\",\n});\nconst featureMember = new gcp.gkehub.FeatureMembership(\"feature_member\", {\n location: \"global\",\n feature: feature.name,\n membership: membership.membershipId,\n policycontroller: {\n policyControllerHubConfig: {\n installSpec: \"INSTALL_SPEC_ENABLED\",\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncluster = gcp.container.Cluster(\"cluster\",\n name=\"my-cluster\",\n location=\"us-central1-a\",\n initial_node_count=1)\nmembership = gcp.gkehub.Membership(\"membership\",\n membership_id=\"my-membership\",\n endpoint={\n \"gke_cluster\": {\n \"resource_link\": cluster.id.apply(lambda id: f\"//container.googleapis.com/{id}\"),\n },\n })\nfeature = gcp.gkehub.Feature(\"feature\",\n name=\"policycontroller\",\n location=\"global\")\nfeature_member = gcp.gkehub.FeatureMembership(\"feature_member\",\n location=\"global\",\n feature=feature.name,\n membership=membership.membership_id,\n policycontroller={\n \"policy_controller_hub_config\": {\n \"install_spec\": \"INSTALL_SPEC_ENABLED\",\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var cluster = new Gcp.Container.Cluster(\"cluster\", new()\n {\n Name = \"my-cluster\",\n Location = \"us-central1-a\",\n InitialNodeCount = 1,\n });\n\n var membership = new Gcp.GkeHub.Membership(\"membership\", new()\n {\n MembershipId = \"my-membership\",\n Endpoint = new Gcp.GkeHub.Inputs.MembershipEndpointArgs\n {\n GkeCluster = new Gcp.GkeHub.Inputs.MembershipEndpointGkeClusterArgs\n {\n ResourceLink = cluster.Id.Apply(id =\u003e $\"//container.googleapis.com/{id}\"),\n },\n },\n });\n\n var feature = new Gcp.GkeHub.Feature(\"feature\", new()\n {\n Name = \"policycontroller\",\n Location = \"global\",\n });\n\n var featureMember = new Gcp.GkeHub.FeatureMembership(\"feature_member\", new()\n {\n Location = \"global\",\n Feature = feature.Name,\n Membership = membership.MembershipId,\n Policycontroller = new Gcp.GkeHub.Inputs.FeatureMembershipPolicycontrollerArgs\n {\n PolicyControllerHubConfig = new Gcp.GkeHub.Inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs\n {\n InstallSpec = \"INSTALL_SPEC_ENABLED\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkehub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcluster, err := container.NewCluster(ctx, \"cluster\", \u0026container.ClusterArgs{\n\t\t\tName: pulumi.String(\"my-cluster\"),\n\t\t\tLocation: pulumi.String(\"us-central1-a\"),\n\t\t\tInitialNodeCount: pulumi.Int(1),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmembership, err := gkehub.NewMembership(ctx, \"membership\", \u0026gkehub.MembershipArgs{\n\t\t\tMembershipId: pulumi.String(\"my-membership\"),\n\t\t\tEndpoint: \u0026gkehub.MembershipEndpointArgs{\n\t\t\t\tGkeCluster: \u0026gkehub.MembershipEndpointGkeClusterArgs{\n\t\t\t\t\tResourceLink: cluster.ID().ApplyT(func(id string) (string, error) {\n\t\t\t\t\t\treturn fmt.Sprintf(\"//container.googleapis.com/%v\", id), nil\n\t\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfeature, err := gkehub.NewFeature(ctx, \"feature\", \u0026gkehub.FeatureArgs{\n\t\t\tName: pulumi.String(\"policycontroller\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = gkehub.NewFeatureMembership(ctx, \"feature_member\", \u0026gkehub.FeatureMembershipArgs{\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tFeature: feature.Name,\n\t\t\tMembership: membership.MembershipId,\n\t\t\tPolicycontroller: \u0026gkehub.FeatureMembershipPolicycontrollerArgs{\n\t\t\t\tPolicyControllerHubConfig: \u0026gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs{\n\t\t\t\t\tInstallSpec: pulumi.String(\"INSTALL_SPEC_ENABLED\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.container.Cluster;\nimport com.pulumi.gcp.container.ClusterArgs;\nimport com.pulumi.gcp.gkehub.Membership;\nimport com.pulumi.gcp.gkehub.MembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointGkeClusterArgs;\nimport com.pulumi.gcp.gkehub.Feature;\nimport com.pulumi.gcp.gkehub.FeatureArgs;\nimport com.pulumi.gcp.gkehub.FeatureMembership;\nimport com.pulumi.gcp.gkehub.FeatureMembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipPolicycontrollerArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var cluster = new Cluster(\"cluster\", ClusterArgs.builder()\n .name(\"my-cluster\")\n .location(\"us-central1-a\")\n .initialNodeCount(1)\n .build());\n\n var membership = new Membership(\"membership\", MembershipArgs.builder()\n .membershipId(\"my-membership\")\n .endpoint(MembershipEndpointArgs.builder()\n .gkeCluster(MembershipEndpointGkeClusterArgs.builder()\n .resourceLink(cluster.id().applyValue(id -\u003e String.format(\"//container.googleapis.com/%s\", id)))\n .build())\n .build())\n .build());\n\n var feature = new Feature(\"feature\", FeatureArgs.builder()\n .name(\"policycontroller\")\n .location(\"global\")\n .build());\n\n var featureMember = new FeatureMembership(\"featureMember\", FeatureMembershipArgs.builder()\n .location(\"global\")\n .feature(feature.name())\n .membership(membership.membershipId())\n .policycontroller(FeatureMembershipPolicycontrollerArgs.builder()\n .policyControllerHubConfig(FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs.builder()\n .installSpec(\"INSTALL_SPEC_ENABLED\")\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster:\n type: gcp:container:Cluster\n properties:\n name: my-cluster\n location: us-central1-a\n initialNodeCount: 1\n membership:\n type: gcp:gkehub:Membership\n properties:\n membershipId: my-membership\n endpoint:\n gkeCluster:\n resourceLink: //container.googleapis.com/${cluster.id}\n feature:\n type: gcp:gkehub:Feature\n properties:\n name: policycontroller\n location: global\n featureMember:\n type: gcp:gkehub:FeatureMembership\n name: feature_member\n properties:\n location: global\n feature: ${feature.name}\n membership: ${membership.membershipId}\n policycontroller:\n policyControllerHubConfig:\n installSpec: INSTALL_SPEC_ENABLED\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Policy Controller With Custom Configurations\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst cluster = new gcp.container.Cluster(\"cluster\", {\n name: \"my-cluster\",\n location: \"us-central1-a\",\n initialNodeCount: 1,\n});\nconst membership = new gcp.gkehub.Membership(\"membership\", {\n membershipId: \"my-membership\",\n endpoint: {\n gkeCluster: {\n resourceLink: pulumi.interpolate`//container.googleapis.com/${cluster.id}`,\n },\n },\n});\nconst feature = new gcp.gkehub.Feature(\"feature\", {\n name: \"policycontroller\",\n location: \"global\",\n});\nconst featureMember = new gcp.gkehub.FeatureMembership(\"feature_member\", {\n location: \"global\",\n feature: feature.name,\n membership: membership.membershipId,\n policycontroller: {\n policyControllerHubConfig: {\n installSpec: \"INSTALL_SPEC_SUSPENDED\",\n policyContent: {\n templateLibrary: {\n installation: \"NOT_INSTALLED\",\n },\n },\n constraintViolationLimit: 50,\n auditIntervalSeconds: 120,\n referentialRulesEnabled: true,\n logDeniesEnabled: true,\n mutationEnabled: true,\n },\n version: \"1.17.0\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncluster = gcp.container.Cluster(\"cluster\",\n name=\"my-cluster\",\n location=\"us-central1-a\",\n initial_node_count=1)\nmembership = gcp.gkehub.Membership(\"membership\",\n membership_id=\"my-membership\",\n endpoint={\n \"gke_cluster\": {\n \"resource_link\": cluster.id.apply(lambda id: f\"//container.googleapis.com/{id}\"),\n },\n })\nfeature = gcp.gkehub.Feature(\"feature\",\n name=\"policycontroller\",\n location=\"global\")\nfeature_member = gcp.gkehub.FeatureMembership(\"feature_member\",\n location=\"global\",\n feature=feature.name,\n membership=membership.membership_id,\n policycontroller={\n \"policy_controller_hub_config\": {\n \"install_spec\": \"INSTALL_SPEC_SUSPENDED\",\n \"policy_content\": {\n \"template_library\": {\n \"installation\": \"NOT_INSTALLED\",\n },\n },\n \"constraint_violation_limit\": 50,\n \"audit_interval_seconds\": 120,\n \"referential_rules_enabled\": True,\n \"log_denies_enabled\": True,\n \"mutation_enabled\": True,\n },\n \"version\": \"1.17.0\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var cluster = new Gcp.Container.Cluster(\"cluster\", new()\n {\n Name = \"my-cluster\",\n Location = \"us-central1-a\",\n InitialNodeCount = 1,\n });\n\n var membership = new Gcp.GkeHub.Membership(\"membership\", new()\n {\n MembershipId = \"my-membership\",\n Endpoint = new Gcp.GkeHub.Inputs.MembershipEndpointArgs\n {\n GkeCluster = new Gcp.GkeHub.Inputs.MembershipEndpointGkeClusterArgs\n {\n ResourceLink = cluster.Id.Apply(id =\u003e $\"//container.googleapis.com/{id}\"),\n },\n },\n });\n\n var feature = new Gcp.GkeHub.Feature(\"feature\", new()\n {\n Name = \"policycontroller\",\n Location = \"global\",\n });\n\n var featureMember = new Gcp.GkeHub.FeatureMembership(\"feature_member\", new()\n {\n Location = \"global\",\n Feature = feature.Name,\n Membership = membership.MembershipId,\n Policycontroller = new Gcp.GkeHub.Inputs.FeatureMembershipPolicycontrollerArgs\n {\n PolicyControllerHubConfig = new Gcp.GkeHub.Inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs\n {\n InstallSpec = \"INSTALL_SPEC_SUSPENDED\",\n PolicyContent = new Gcp.GkeHub.Inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentArgs\n {\n TemplateLibrary = new Gcp.GkeHub.Inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs\n {\n Installation = \"NOT_INSTALLED\",\n },\n },\n ConstraintViolationLimit = 50,\n AuditIntervalSeconds = 120,\n ReferentialRulesEnabled = true,\n LogDeniesEnabled = true,\n MutationEnabled = true,\n },\n Version = \"1.17.0\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkehub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcluster, err := container.NewCluster(ctx, \"cluster\", \u0026container.ClusterArgs{\n\t\t\tName: pulumi.String(\"my-cluster\"),\n\t\t\tLocation: pulumi.String(\"us-central1-a\"),\n\t\t\tInitialNodeCount: pulumi.Int(1),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmembership, err := gkehub.NewMembership(ctx, \"membership\", \u0026gkehub.MembershipArgs{\n\t\t\tMembershipId: pulumi.String(\"my-membership\"),\n\t\t\tEndpoint: \u0026gkehub.MembershipEndpointArgs{\n\t\t\t\tGkeCluster: \u0026gkehub.MembershipEndpointGkeClusterArgs{\n\t\t\t\t\tResourceLink: cluster.ID().ApplyT(func(id string) (string, error) {\n\t\t\t\t\t\treturn fmt.Sprintf(\"//container.googleapis.com/%v\", id), nil\n\t\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfeature, err := gkehub.NewFeature(ctx, \"feature\", \u0026gkehub.FeatureArgs{\n\t\t\tName: pulumi.String(\"policycontroller\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = gkehub.NewFeatureMembership(ctx, \"feature_member\", \u0026gkehub.FeatureMembershipArgs{\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tFeature: feature.Name,\n\t\t\tMembership: membership.MembershipId,\n\t\t\tPolicycontroller: \u0026gkehub.FeatureMembershipPolicycontrollerArgs{\n\t\t\t\tPolicyControllerHubConfig: \u0026gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs{\n\t\t\t\t\tInstallSpec: pulumi.String(\"INSTALL_SPEC_SUSPENDED\"),\n\t\t\t\t\tPolicyContent: \u0026gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentArgs{\n\t\t\t\t\t\tTemplateLibrary: \u0026gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs{\n\t\t\t\t\t\t\tInstallation: pulumi.String(\"NOT_INSTALLED\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tConstraintViolationLimit: pulumi.Int(50),\n\t\t\t\t\tAuditIntervalSeconds: pulumi.Int(120),\n\t\t\t\t\tReferentialRulesEnabled: pulumi.Bool(true),\n\t\t\t\t\tLogDeniesEnabled: pulumi.Bool(true),\n\t\t\t\t\tMutationEnabled: pulumi.Bool(true),\n\t\t\t\t},\n\t\t\t\tVersion: pulumi.String(\"1.17.0\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.container.Cluster;\nimport com.pulumi.gcp.container.ClusterArgs;\nimport com.pulumi.gcp.gkehub.Membership;\nimport com.pulumi.gcp.gkehub.MembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointGkeClusterArgs;\nimport com.pulumi.gcp.gkehub.Feature;\nimport com.pulumi.gcp.gkehub.FeatureArgs;\nimport com.pulumi.gcp.gkehub.FeatureMembership;\nimport com.pulumi.gcp.gkehub.FeatureMembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipPolicycontrollerArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var cluster = new Cluster(\"cluster\", ClusterArgs.builder()\n .name(\"my-cluster\")\n .location(\"us-central1-a\")\n .initialNodeCount(1)\n .build());\n\n var membership = new Membership(\"membership\", MembershipArgs.builder()\n .membershipId(\"my-membership\")\n .endpoint(MembershipEndpointArgs.builder()\n .gkeCluster(MembershipEndpointGkeClusterArgs.builder()\n .resourceLink(cluster.id().applyValue(id -\u003e String.format(\"//container.googleapis.com/%s\", id)))\n .build())\n .build())\n .build());\n\n var feature = new Feature(\"feature\", FeatureArgs.builder()\n .name(\"policycontroller\")\n .location(\"global\")\n .build());\n\n var featureMember = new FeatureMembership(\"featureMember\", FeatureMembershipArgs.builder()\n .location(\"global\")\n .feature(feature.name())\n .membership(membership.membershipId())\n .policycontroller(FeatureMembershipPolicycontrollerArgs.builder()\n .policyControllerHubConfig(FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs.builder()\n .installSpec(\"INSTALL_SPEC_SUSPENDED\")\n .policyContent(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentArgs.builder()\n .templateLibrary(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs.builder()\n .installation(\"NOT_INSTALLED\")\n .build())\n .build())\n .constraintViolationLimit(50)\n .auditIntervalSeconds(120)\n .referentialRulesEnabled(true)\n .logDeniesEnabled(true)\n .mutationEnabled(true)\n .build())\n .version(\"1.17.0\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster:\n type: gcp:container:Cluster\n properties:\n name: my-cluster\n location: us-central1-a\n initialNodeCount: 1\n membership:\n type: gcp:gkehub:Membership\n properties:\n membershipId: my-membership\n endpoint:\n gkeCluster:\n resourceLink: //container.googleapis.com/${cluster.id}\n feature:\n type: gcp:gkehub:Feature\n properties:\n name: policycontroller\n location: global\n featureMember:\n type: gcp:gkehub:FeatureMembership\n name: feature_member\n properties:\n location: global\n feature: ${feature.name}\n membership: ${membership.membershipId}\n policycontroller:\n policyControllerHubConfig:\n installSpec: INSTALL_SPEC_SUSPENDED\n policyContent:\n templateLibrary:\n installation: NOT_INSTALLED\n constraintViolationLimit: 50\n auditIntervalSeconds: 120\n referentialRulesEnabled: true\n logDeniesEnabled: true\n mutationEnabled: true\n version: 1.17.0\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nFeatureMembership can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/features/{{feature}}/membershipId/{{membership}}`\n\n* `{{project}}/{{location}}/{{feature}}/{{membership}}`\n\n* `{{location}}/{{feature}}/{{membership}}`\n\nWhen using the `pulumi import` command, FeatureMembership can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:gkehub/featureMembership:FeatureMembership default projects/{{project}}/locations/{{location}}/features/{{feature}}/membershipId/{{membership}}\n```\n\n```sh\n$ pulumi import gcp:gkehub/featureMembership:FeatureMembership default {{project}}/{{location}}/{{feature}}/{{membership}}\n```\n\n```sh\n$ pulumi import gcp:gkehub/featureMembership:FeatureMembership default {{location}}/{{feature}}/{{membership}}\n```\n\n", + "description": "Contains information about a GKEHub Feature Memberships. Feature Memberships configure GKEHub Features that apply to specific memberships rather than the project as a whole. The google_gke_hub is the Fleet API.\n\n## Example Usage\n\n### Config Management\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst cluster = new gcp.container.Cluster(\"cluster\", {\n name: \"my-cluster\",\n location: \"us-central1-a\",\n initialNodeCount: 1,\n});\nconst membership = new gcp.gkehub.Membership(\"membership\", {\n membershipId: \"my-membership\",\n endpoint: {\n gkeCluster: {\n resourceLink: pulumi.interpolate`//container.googleapis.com/${cluster.id}`,\n },\n },\n});\nconst feature = new gcp.gkehub.Feature(\"feature\", {\n name: \"configmanagement\",\n location: \"global\",\n labels: {\n foo: \"bar\",\n },\n});\nconst featureMember = new gcp.gkehub.FeatureMembership(\"feature_member\", {\n location: \"global\",\n feature: feature.name,\n membership: membership.membershipId,\n configmanagement: {\n version: \"1.19.0\",\n configSync: {\n enabled: true,\n git: {\n syncRepo: \"https://github.com/hashicorp/terraform\",\n },\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncluster = gcp.container.Cluster(\"cluster\",\n name=\"my-cluster\",\n location=\"us-central1-a\",\n initial_node_count=1)\nmembership = gcp.gkehub.Membership(\"membership\",\n membership_id=\"my-membership\",\n endpoint={\n \"gke_cluster\": {\n \"resource_link\": cluster.id.apply(lambda id: f\"//container.googleapis.com/{id}\"),\n },\n })\nfeature = gcp.gkehub.Feature(\"feature\",\n name=\"configmanagement\",\n location=\"global\",\n labels={\n \"foo\": \"bar\",\n })\nfeature_member = gcp.gkehub.FeatureMembership(\"feature_member\",\n location=\"global\",\n feature=feature.name,\n membership=membership.membership_id,\n configmanagement={\n \"version\": \"1.19.0\",\n \"config_sync\": {\n \"enabled\": True,\n \"git\": {\n \"sync_repo\": \"https://github.com/hashicorp/terraform\",\n },\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var cluster = new Gcp.Container.Cluster(\"cluster\", new()\n {\n Name = \"my-cluster\",\n Location = \"us-central1-a\",\n InitialNodeCount = 1,\n });\n\n var membership = new Gcp.GkeHub.Membership(\"membership\", new()\n {\n MembershipId = \"my-membership\",\n Endpoint = new Gcp.GkeHub.Inputs.MembershipEndpointArgs\n {\n GkeCluster = new Gcp.GkeHub.Inputs.MembershipEndpointGkeClusterArgs\n {\n ResourceLink = cluster.Id.Apply(id =\u003e $\"//container.googleapis.com/{id}\"),\n },\n },\n });\n\n var feature = new Gcp.GkeHub.Feature(\"feature\", new()\n {\n Name = \"configmanagement\",\n Location = \"global\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n });\n\n var featureMember = new Gcp.GkeHub.FeatureMembership(\"feature_member\", new()\n {\n Location = \"global\",\n Feature = feature.Name,\n Membership = membership.MembershipId,\n Configmanagement = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementArgs\n {\n Version = \"1.19.0\",\n ConfigSync = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncArgs\n {\n Enabled = true,\n Git = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncGitArgs\n {\n SyncRepo = \"https://github.com/hashicorp/terraform\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkehub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcluster, err := container.NewCluster(ctx, \"cluster\", \u0026container.ClusterArgs{\n\t\t\tName: pulumi.String(\"my-cluster\"),\n\t\t\tLocation: pulumi.String(\"us-central1-a\"),\n\t\t\tInitialNodeCount: pulumi.Int(1),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmembership, err := gkehub.NewMembership(ctx, \"membership\", \u0026gkehub.MembershipArgs{\n\t\t\tMembershipId: pulumi.String(\"my-membership\"),\n\t\t\tEndpoint: \u0026gkehub.MembershipEndpointArgs{\n\t\t\t\tGkeCluster: \u0026gkehub.MembershipEndpointGkeClusterArgs{\n\t\t\t\t\tResourceLink: cluster.ID().ApplyT(func(id string) (string, error) {\n\t\t\t\t\t\treturn fmt.Sprintf(\"//container.googleapis.com/%v\", id), nil\n\t\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfeature, err := gkehub.NewFeature(ctx, \"feature\", \u0026gkehub.FeatureArgs{\n\t\t\tName: pulumi.String(\"configmanagement\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = gkehub.NewFeatureMembership(ctx, \"feature_member\", \u0026gkehub.FeatureMembershipArgs{\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tFeature: feature.Name,\n\t\t\tMembership: membership.MembershipId,\n\t\t\tConfigmanagement: \u0026gkehub.FeatureMembershipConfigmanagementArgs{\n\t\t\t\tVersion: pulumi.String(\"1.19.0\"),\n\t\t\t\tConfigSync: \u0026gkehub.FeatureMembershipConfigmanagementConfigSyncArgs{\n\t\t\t\t\tEnabled: pulumi.Bool(true),\n\t\t\t\t\tGit: \u0026gkehub.FeatureMembershipConfigmanagementConfigSyncGitArgs{\n\t\t\t\t\t\tSyncRepo: pulumi.String(\"https://github.com/hashicorp/terraform\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.container.Cluster;\nimport com.pulumi.gcp.container.ClusterArgs;\nimport com.pulumi.gcp.gkehub.Membership;\nimport com.pulumi.gcp.gkehub.MembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointGkeClusterArgs;\nimport com.pulumi.gcp.gkehub.Feature;\nimport com.pulumi.gcp.gkehub.FeatureArgs;\nimport com.pulumi.gcp.gkehub.FeatureMembership;\nimport com.pulumi.gcp.gkehub.FeatureMembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementConfigSyncArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementConfigSyncGitArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var cluster = new Cluster(\"cluster\", ClusterArgs.builder()\n .name(\"my-cluster\")\n .location(\"us-central1-a\")\n .initialNodeCount(1)\n .build());\n\n var membership = new Membership(\"membership\", MembershipArgs.builder()\n .membershipId(\"my-membership\")\n .endpoint(MembershipEndpointArgs.builder()\n .gkeCluster(MembershipEndpointGkeClusterArgs.builder()\n .resourceLink(cluster.id().applyValue(id -\u003e String.format(\"//container.googleapis.com/%s\", id)))\n .build())\n .build())\n .build());\n\n var feature = new Feature(\"feature\", FeatureArgs.builder()\n .name(\"configmanagement\")\n .location(\"global\")\n .labels(Map.of(\"foo\", \"bar\"))\n .build());\n\n var featureMember = new FeatureMembership(\"featureMember\", FeatureMembershipArgs.builder()\n .location(\"global\")\n .feature(feature.name())\n .membership(membership.membershipId())\n .configmanagement(FeatureMembershipConfigmanagementArgs.builder()\n .version(\"1.19.0\")\n .configSync(FeatureMembershipConfigmanagementConfigSyncArgs.builder()\n .enabled(true)\n .git(FeatureMembershipConfigmanagementConfigSyncGitArgs.builder()\n .syncRepo(\"https://github.com/hashicorp/terraform\")\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster:\n type: gcp:container:Cluster\n properties:\n name: my-cluster\n location: us-central1-a\n initialNodeCount: 1\n membership:\n type: gcp:gkehub:Membership\n properties:\n membershipId: my-membership\n endpoint:\n gkeCluster:\n resourceLink: //container.googleapis.com/${cluster.id}\n feature:\n type: gcp:gkehub:Feature\n properties:\n name: configmanagement\n location: global\n labels:\n foo: bar\n featureMember:\n type: gcp:gkehub:FeatureMembership\n name: feature_member\n properties:\n location: global\n feature: ${feature.name}\n membership: ${membership.membershipId}\n configmanagement:\n version: 1.19.0\n configSync:\n enabled: true\n git:\n syncRepo: https://github.com/hashicorp/terraform\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Config Management With OCI\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst cluster = new gcp.container.Cluster(\"cluster\", {\n name: \"my-cluster\",\n location: \"us-central1-a\",\n initialNodeCount: 1,\n});\nconst membership = new gcp.gkehub.Membership(\"membership\", {\n membershipId: \"my-membership\",\n endpoint: {\n gkeCluster: {\n resourceLink: pulumi.interpolate`//container.googleapis.com/${cluster.id}`,\n },\n },\n});\nconst feature = new gcp.gkehub.Feature(\"feature\", {\n name: \"configmanagement\",\n location: \"global\",\n labels: {\n foo: \"bar\",\n },\n});\nconst featureMember = new gcp.gkehub.FeatureMembership(\"feature_member\", {\n location: \"global\",\n feature: feature.name,\n membership: membership.membershipId,\n configmanagement: {\n version: \"1.19.0\",\n configSync: {\n enabled: true,\n oci: {\n syncRepo: \"us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest\",\n policyDir: \"config-connector\",\n syncWaitSecs: \"20\",\n secretType: \"gcpserviceaccount\",\n gcpServiceAccountEmail: \"sa@project-id.iam.gserviceaccount.com\",\n },\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncluster = gcp.container.Cluster(\"cluster\",\n name=\"my-cluster\",\n location=\"us-central1-a\",\n initial_node_count=1)\nmembership = gcp.gkehub.Membership(\"membership\",\n membership_id=\"my-membership\",\n endpoint={\n \"gke_cluster\": {\n \"resource_link\": cluster.id.apply(lambda id: f\"//container.googleapis.com/{id}\"),\n },\n })\nfeature = gcp.gkehub.Feature(\"feature\",\n name=\"configmanagement\",\n location=\"global\",\n labels={\n \"foo\": \"bar\",\n })\nfeature_member = gcp.gkehub.FeatureMembership(\"feature_member\",\n location=\"global\",\n feature=feature.name,\n membership=membership.membership_id,\n configmanagement={\n \"version\": \"1.19.0\",\n \"config_sync\": {\n \"enabled\": True,\n \"oci\": {\n \"sync_repo\": \"us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest\",\n \"policy_dir\": \"config-connector\",\n \"sync_wait_secs\": \"20\",\n \"secret_type\": \"gcpserviceaccount\",\n \"gcp_service_account_email\": \"sa@project-id.iam.gserviceaccount.com\",\n },\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var cluster = new Gcp.Container.Cluster(\"cluster\", new()\n {\n Name = \"my-cluster\",\n Location = \"us-central1-a\",\n InitialNodeCount = 1,\n });\n\n var membership = new Gcp.GkeHub.Membership(\"membership\", new()\n {\n MembershipId = \"my-membership\",\n Endpoint = new Gcp.GkeHub.Inputs.MembershipEndpointArgs\n {\n GkeCluster = new Gcp.GkeHub.Inputs.MembershipEndpointGkeClusterArgs\n {\n ResourceLink = cluster.Id.Apply(id =\u003e $\"//container.googleapis.com/{id}\"),\n },\n },\n });\n\n var feature = new Gcp.GkeHub.Feature(\"feature\", new()\n {\n Name = \"configmanagement\",\n Location = \"global\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n });\n\n var featureMember = new Gcp.GkeHub.FeatureMembership(\"feature_member\", new()\n {\n Location = \"global\",\n Feature = feature.Name,\n Membership = membership.MembershipId,\n Configmanagement = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementArgs\n {\n Version = \"1.19.0\",\n ConfigSync = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncArgs\n {\n Enabled = true,\n Oci = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncOciArgs\n {\n SyncRepo = \"us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest\",\n PolicyDir = \"config-connector\",\n SyncWaitSecs = \"20\",\n SecretType = \"gcpserviceaccount\",\n GcpServiceAccountEmail = \"sa@project-id.iam.gserviceaccount.com\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkehub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcluster, err := container.NewCluster(ctx, \"cluster\", \u0026container.ClusterArgs{\n\t\t\tName: pulumi.String(\"my-cluster\"),\n\t\t\tLocation: pulumi.String(\"us-central1-a\"),\n\t\t\tInitialNodeCount: pulumi.Int(1),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmembership, err := gkehub.NewMembership(ctx, \"membership\", \u0026gkehub.MembershipArgs{\n\t\t\tMembershipId: pulumi.String(\"my-membership\"),\n\t\t\tEndpoint: \u0026gkehub.MembershipEndpointArgs{\n\t\t\t\tGkeCluster: \u0026gkehub.MembershipEndpointGkeClusterArgs{\n\t\t\t\t\tResourceLink: cluster.ID().ApplyT(func(id string) (string, error) {\n\t\t\t\t\t\treturn fmt.Sprintf(\"//container.googleapis.com/%v\", id), nil\n\t\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfeature, err := gkehub.NewFeature(ctx, \"feature\", \u0026gkehub.FeatureArgs{\n\t\t\tName: pulumi.String(\"configmanagement\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = gkehub.NewFeatureMembership(ctx, \"feature_member\", \u0026gkehub.FeatureMembershipArgs{\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tFeature: feature.Name,\n\t\t\tMembership: membership.MembershipId,\n\t\t\tConfigmanagement: \u0026gkehub.FeatureMembershipConfigmanagementArgs{\n\t\t\t\tVersion: pulumi.String(\"1.19.0\"),\n\t\t\t\tConfigSync: \u0026gkehub.FeatureMembershipConfigmanagementConfigSyncArgs{\n\t\t\t\t\tEnabled: pulumi.Bool(true),\n\t\t\t\t\tOci: \u0026gkehub.FeatureMembershipConfigmanagementConfigSyncOciArgs{\n\t\t\t\t\t\tSyncRepo: pulumi.String(\"us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest\"),\n\t\t\t\t\t\tPolicyDir: pulumi.String(\"config-connector\"),\n\t\t\t\t\t\tSyncWaitSecs: pulumi.String(\"20\"),\n\t\t\t\t\t\tSecretType: pulumi.String(\"gcpserviceaccount\"),\n\t\t\t\t\t\tGcpServiceAccountEmail: pulumi.String(\"sa@project-id.iam.gserviceaccount.com\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.container.Cluster;\nimport com.pulumi.gcp.container.ClusterArgs;\nimport com.pulumi.gcp.gkehub.Membership;\nimport com.pulumi.gcp.gkehub.MembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointGkeClusterArgs;\nimport com.pulumi.gcp.gkehub.Feature;\nimport com.pulumi.gcp.gkehub.FeatureArgs;\nimport com.pulumi.gcp.gkehub.FeatureMembership;\nimport com.pulumi.gcp.gkehub.FeatureMembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementConfigSyncArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementConfigSyncOciArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var cluster = new Cluster(\"cluster\", ClusterArgs.builder()\n .name(\"my-cluster\")\n .location(\"us-central1-a\")\n .initialNodeCount(1)\n .build());\n\n var membership = new Membership(\"membership\", MembershipArgs.builder()\n .membershipId(\"my-membership\")\n .endpoint(MembershipEndpointArgs.builder()\n .gkeCluster(MembershipEndpointGkeClusterArgs.builder()\n .resourceLink(cluster.id().applyValue(id -\u003e String.format(\"//container.googleapis.com/%s\", id)))\n .build())\n .build())\n .build());\n\n var feature = new Feature(\"feature\", FeatureArgs.builder()\n .name(\"configmanagement\")\n .location(\"global\")\n .labels(Map.of(\"foo\", \"bar\"))\n .build());\n\n var featureMember = new FeatureMembership(\"featureMember\", FeatureMembershipArgs.builder()\n .location(\"global\")\n .feature(feature.name())\n .membership(membership.membershipId())\n .configmanagement(FeatureMembershipConfigmanagementArgs.builder()\n .version(\"1.19.0\")\n .configSync(FeatureMembershipConfigmanagementConfigSyncArgs.builder()\n .enabled(true)\n .oci(FeatureMembershipConfigmanagementConfigSyncOciArgs.builder()\n .syncRepo(\"us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest\")\n .policyDir(\"config-connector\")\n .syncWaitSecs(\"20\")\n .secretType(\"gcpserviceaccount\")\n .gcpServiceAccountEmail(\"sa@project-id.iam.gserviceaccount.com\")\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster:\n type: gcp:container:Cluster\n properties:\n name: my-cluster\n location: us-central1-a\n initialNodeCount: 1\n membership:\n type: gcp:gkehub:Membership\n properties:\n membershipId: my-membership\n endpoint:\n gkeCluster:\n resourceLink: //container.googleapis.com/${cluster.id}\n feature:\n type: gcp:gkehub:Feature\n properties:\n name: configmanagement\n location: global\n labels:\n foo: bar\n featureMember:\n type: gcp:gkehub:FeatureMembership\n name: feature_member\n properties:\n location: global\n feature: ${feature.name}\n membership: ${membership.membershipId}\n configmanagement:\n version: 1.19.0\n configSync:\n enabled: true\n oci:\n syncRepo: us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest\n policyDir: config-connector\n syncWaitSecs: '20'\n secretType: gcpserviceaccount\n gcpServiceAccountEmail: sa@project-id.iam.gserviceaccount.com\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Multi Cluster Service Discovery\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst feature = new gcp.gkehub.Feature(\"feature\", {\n name: \"multiclusterservicediscovery\",\n location: \"global\",\n labels: {\n foo: \"bar\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nfeature = gcp.gkehub.Feature(\"feature\",\n name=\"multiclusterservicediscovery\",\n location=\"global\",\n labels={\n \"foo\": \"bar\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var feature = new Gcp.GkeHub.Feature(\"feature\", new()\n {\n Name = \"multiclusterservicediscovery\",\n Location = \"global\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkehub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := gkehub.NewFeature(ctx, \"feature\", \u0026gkehub.FeatureArgs{\n\t\t\tName: pulumi.String(\"multiclusterservicediscovery\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.gkehub.Feature;\nimport com.pulumi.gcp.gkehub.FeatureArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var feature = new Feature(\"feature\", FeatureArgs.builder()\n .name(\"multiclusterservicediscovery\")\n .location(\"global\")\n .labels(Map.of(\"foo\", \"bar\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n feature:\n type: gcp:gkehub:Feature\n properties:\n name: multiclusterservicediscovery\n location: global\n labels:\n foo: bar\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Service Mesh\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst cluster = new gcp.container.Cluster(\"cluster\", {\n name: \"my-cluster\",\n location: \"us-central1-a\",\n initialNodeCount: 1,\n});\nconst membership = new gcp.gkehub.Membership(\"membership\", {\n membershipId: \"my-membership\",\n endpoint: {\n gkeCluster: {\n resourceLink: pulumi.interpolate`//container.googleapis.com/${cluster.id}`,\n },\n },\n});\nconst feature = new gcp.gkehub.Feature(\"feature\", {\n name: \"servicemesh\",\n location: \"global\",\n});\nconst featureMember = new gcp.gkehub.FeatureMembership(\"feature_member\", {\n location: \"global\",\n feature: feature.name,\n membership: membership.membershipId,\n mesh: {\n management: \"MANAGEMENT_AUTOMATIC\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncluster = gcp.container.Cluster(\"cluster\",\n name=\"my-cluster\",\n location=\"us-central1-a\",\n initial_node_count=1)\nmembership = gcp.gkehub.Membership(\"membership\",\n membership_id=\"my-membership\",\n endpoint={\n \"gke_cluster\": {\n \"resource_link\": cluster.id.apply(lambda id: f\"//container.googleapis.com/{id}\"),\n },\n })\nfeature = gcp.gkehub.Feature(\"feature\",\n name=\"servicemesh\",\n location=\"global\")\nfeature_member = gcp.gkehub.FeatureMembership(\"feature_member\",\n location=\"global\",\n feature=feature.name,\n membership=membership.membership_id,\n mesh={\n \"management\": \"MANAGEMENT_AUTOMATIC\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var cluster = new Gcp.Container.Cluster(\"cluster\", new()\n {\n Name = \"my-cluster\",\n Location = \"us-central1-a\",\n InitialNodeCount = 1,\n });\n\n var membership = new Gcp.GkeHub.Membership(\"membership\", new()\n {\n MembershipId = \"my-membership\",\n Endpoint = new Gcp.GkeHub.Inputs.MembershipEndpointArgs\n {\n GkeCluster = new Gcp.GkeHub.Inputs.MembershipEndpointGkeClusterArgs\n {\n ResourceLink = cluster.Id.Apply(id =\u003e $\"//container.googleapis.com/{id}\"),\n },\n },\n });\n\n var feature = new Gcp.GkeHub.Feature(\"feature\", new()\n {\n Name = \"servicemesh\",\n Location = \"global\",\n });\n\n var featureMember = new Gcp.GkeHub.FeatureMembership(\"feature_member\", new()\n {\n Location = \"global\",\n Feature = feature.Name,\n Membership = membership.MembershipId,\n Mesh = new Gcp.GkeHub.Inputs.FeatureMembershipMeshArgs\n {\n Management = \"MANAGEMENT_AUTOMATIC\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkehub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcluster, err := container.NewCluster(ctx, \"cluster\", \u0026container.ClusterArgs{\n\t\t\tName: pulumi.String(\"my-cluster\"),\n\t\t\tLocation: pulumi.String(\"us-central1-a\"),\n\t\t\tInitialNodeCount: pulumi.Int(1),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmembership, err := gkehub.NewMembership(ctx, \"membership\", \u0026gkehub.MembershipArgs{\n\t\t\tMembershipId: pulumi.String(\"my-membership\"),\n\t\t\tEndpoint: \u0026gkehub.MembershipEndpointArgs{\n\t\t\t\tGkeCluster: \u0026gkehub.MembershipEndpointGkeClusterArgs{\n\t\t\t\t\tResourceLink: cluster.ID().ApplyT(func(id string) (string, error) {\n\t\t\t\t\t\treturn fmt.Sprintf(\"//container.googleapis.com/%v\", id), nil\n\t\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfeature, err := gkehub.NewFeature(ctx, \"feature\", \u0026gkehub.FeatureArgs{\n\t\t\tName: pulumi.String(\"servicemesh\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = gkehub.NewFeatureMembership(ctx, \"feature_member\", \u0026gkehub.FeatureMembershipArgs{\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tFeature: feature.Name,\n\t\t\tMembership: membership.MembershipId,\n\t\t\tMesh: \u0026gkehub.FeatureMembershipMeshArgs{\n\t\t\t\tManagement: pulumi.String(\"MANAGEMENT_AUTOMATIC\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.container.Cluster;\nimport com.pulumi.gcp.container.ClusterArgs;\nimport com.pulumi.gcp.gkehub.Membership;\nimport com.pulumi.gcp.gkehub.MembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointGkeClusterArgs;\nimport com.pulumi.gcp.gkehub.Feature;\nimport com.pulumi.gcp.gkehub.FeatureArgs;\nimport com.pulumi.gcp.gkehub.FeatureMembership;\nimport com.pulumi.gcp.gkehub.FeatureMembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipMeshArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var cluster = new Cluster(\"cluster\", ClusterArgs.builder()\n .name(\"my-cluster\")\n .location(\"us-central1-a\")\n .initialNodeCount(1)\n .build());\n\n var membership = new Membership(\"membership\", MembershipArgs.builder()\n .membershipId(\"my-membership\")\n .endpoint(MembershipEndpointArgs.builder()\n .gkeCluster(MembershipEndpointGkeClusterArgs.builder()\n .resourceLink(cluster.id().applyValue(id -\u003e String.format(\"//container.googleapis.com/%s\", id)))\n .build())\n .build())\n .build());\n\n var feature = new Feature(\"feature\", FeatureArgs.builder()\n .name(\"servicemesh\")\n .location(\"global\")\n .build());\n\n var featureMember = new FeatureMembership(\"featureMember\", FeatureMembershipArgs.builder()\n .location(\"global\")\n .feature(feature.name())\n .membership(membership.membershipId())\n .mesh(FeatureMembershipMeshArgs.builder()\n .management(\"MANAGEMENT_AUTOMATIC\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster:\n type: gcp:container:Cluster\n properties:\n name: my-cluster\n location: us-central1-a\n initialNodeCount: 1\n membership:\n type: gcp:gkehub:Membership\n properties:\n membershipId: my-membership\n endpoint:\n gkeCluster:\n resourceLink: //container.googleapis.com/${cluster.id}\n feature:\n type: gcp:gkehub:Feature\n properties:\n name: servicemesh\n location: global\n featureMember:\n type: gcp:gkehub:FeatureMembership\n name: feature_member\n properties:\n location: global\n feature: ${feature.name}\n membership: ${membership.membershipId}\n mesh:\n management: MANAGEMENT_AUTOMATIC\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Config Management With Regional Membership\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst cluster = new gcp.container.Cluster(\"cluster\", {\n name: \"my-cluster\",\n location: \"us-central1-a\",\n initialNodeCount: 1,\n});\nconst membership = new gcp.gkehub.Membership(\"membership\", {\n membershipId: \"my-membership\",\n location: \"us-central1\",\n endpoint: {\n gkeCluster: {\n resourceLink: pulumi.interpolate`//container.googleapis.com/${cluster.id}`,\n },\n },\n});\nconst feature = new gcp.gkehub.Feature(\"feature\", {\n name: \"configmanagement\",\n location: \"global\",\n labels: {\n foo: \"bar\",\n },\n});\nconst featureMember = new gcp.gkehub.FeatureMembership(\"feature_member\", {\n location: \"global\",\n feature: feature.name,\n membership: membership.membershipId,\n membershipLocation: membership.location,\n configmanagement: {\n version: \"1.19.0\",\n configSync: {\n enabled: true,\n git: {\n syncRepo: \"https://github.com/hashicorp/terraform\",\n },\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncluster = gcp.container.Cluster(\"cluster\",\n name=\"my-cluster\",\n location=\"us-central1-a\",\n initial_node_count=1)\nmembership = gcp.gkehub.Membership(\"membership\",\n membership_id=\"my-membership\",\n location=\"us-central1\",\n endpoint={\n \"gke_cluster\": {\n \"resource_link\": cluster.id.apply(lambda id: f\"//container.googleapis.com/{id}\"),\n },\n })\nfeature = gcp.gkehub.Feature(\"feature\",\n name=\"configmanagement\",\n location=\"global\",\n labels={\n \"foo\": \"bar\",\n })\nfeature_member = gcp.gkehub.FeatureMembership(\"feature_member\",\n location=\"global\",\n feature=feature.name,\n membership=membership.membership_id,\n membership_location=membership.location,\n configmanagement={\n \"version\": \"1.19.0\",\n \"config_sync\": {\n \"enabled\": True,\n \"git\": {\n \"sync_repo\": \"https://github.com/hashicorp/terraform\",\n },\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var cluster = new Gcp.Container.Cluster(\"cluster\", new()\n {\n Name = \"my-cluster\",\n Location = \"us-central1-a\",\n InitialNodeCount = 1,\n });\n\n var membership = new Gcp.GkeHub.Membership(\"membership\", new()\n {\n MembershipId = \"my-membership\",\n Location = \"us-central1\",\n Endpoint = new Gcp.GkeHub.Inputs.MembershipEndpointArgs\n {\n GkeCluster = new Gcp.GkeHub.Inputs.MembershipEndpointGkeClusterArgs\n {\n ResourceLink = cluster.Id.Apply(id =\u003e $\"//container.googleapis.com/{id}\"),\n },\n },\n });\n\n var feature = new Gcp.GkeHub.Feature(\"feature\", new()\n {\n Name = \"configmanagement\",\n Location = \"global\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n });\n\n var featureMember = new Gcp.GkeHub.FeatureMembership(\"feature_member\", new()\n {\n Location = \"global\",\n Feature = feature.Name,\n Membership = membership.MembershipId,\n MembershipLocation = membership.Location,\n Configmanagement = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementArgs\n {\n Version = \"1.19.0\",\n ConfigSync = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncArgs\n {\n Enabled = true,\n Git = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncGitArgs\n {\n SyncRepo = \"https://github.com/hashicorp/terraform\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkehub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcluster, err := container.NewCluster(ctx, \"cluster\", \u0026container.ClusterArgs{\n\t\t\tName: pulumi.String(\"my-cluster\"),\n\t\t\tLocation: pulumi.String(\"us-central1-a\"),\n\t\t\tInitialNodeCount: pulumi.Int(1),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmembership, err := gkehub.NewMembership(ctx, \"membership\", \u0026gkehub.MembershipArgs{\n\t\t\tMembershipId: pulumi.String(\"my-membership\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tEndpoint: \u0026gkehub.MembershipEndpointArgs{\n\t\t\t\tGkeCluster: \u0026gkehub.MembershipEndpointGkeClusterArgs{\n\t\t\t\t\tResourceLink: cluster.ID().ApplyT(func(id string) (string, error) {\n\t\t\t\t\t\treturn fmt.Sprintf(\"//container.googleapis.com/%v\", id), nil\n\t\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfeature, err := gkehub.NewFeature(ctx, \"feature\", \u0026gkehub.FeatureArgs{\n\t\t\tName: pulumi.String(\"configmanagement\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = gkehub.NewFeatureMembership(ctx, \"feature_member\", \u0026gkehub.FeatureMembershipArgs{\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tFeature: feature.Name,\n\t\t\tMembership: membership.MembershipId,\n\t\t\tMembershipLocation: membership.Location,\n\t\t\tConfigmanagement: \u0026gkehub.FeatureMembershipConfigmanagementArgs{\n\t\t\t\tVersion: pulumi.String(\"1.19.0\"),\n\t\t\t\tConfigSync: \u0026gkehub.FeatureMembershipConfigmanagementConfigSyncArgs{\n\t\t\t\t\tEnabled: pulumi.Bool(true),\n\t\t\t\t\tGit: \u0026gkehub.FeatureMembershipConfigmanagementConfigSyncGitArgs{\n\t\t\t\t\t\tSyncRepo: pulumi.String(\"https://github.com/hashicorp/terraform\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.container.Cluster;\nimport com.pulumi.gcp.container.ClusterArgs;\nimport com.pulumi.gcp.gkehub.Membership;\nimport com.pulumi.gcp.gkehub.MembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointGkeClusterArgs;\nimport com.pulumi.gcp.gkehub.Feature;\nimport com.pulumi.gcp.gkehub.FeatureArgs;\nimport com.pulumi.gcp.gkehub.FeatureMembership;\nimport com.pulumi.gcp.gkehub.FeatureMembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementConfigSyncArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipConfigmanagementConfigSyncGitArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var cluster = new Cluster(\"cluster\", ClusterArgs.builder()\n .name(\"my-cluster\")\n .location(\"us-central1-a\")\n .initialNodeCount(1)\n .build());\n\n var membership = new Membership(\"membership\", MembershipArgs.builder()\n .membershipId(\"my-membership\")\n .location(\"us-central1\")\n .endpoint(MembershipEndpointArgs.builder()\n .gkeCluster(MembershipEndpointGkeClusterArgs.builder()\n .resourceLink(cluster.id().applyValue(id -\u003e String.format(\"//container.googleapis.com/%s\", id)))\n .build())\n .build())\n .build());\n\n var feature = new Feature(\"feature\", FeatureArgs.builder()\n .name(\"configmanagement\")\n .location(\"global\")\n .labels(Map.of(\"foo\", \"bar\"))\n .build());\n\n var featureMember = new FeatureMembership(\"featureMember\", FeatureMembershipArgs.builder()\n .location(\"global\")\n .feature(feature.name())\n .membership(membership.membershipId())\n .membershipLocation(membership.location())\n .configmanagement(FeatureMembershipConfigmanagementArgs.builder()\n .version(\"1.19.0\")\n .configSync(FeatureMembershipConfigmanagementConfigSyncArgs.builder()\n .enabled(true)\n .git(FeatureMembershipConfigmanagementConfigSyncGitArgs.builder()\n .syncRepo(\"https://github.com/hashicorp/terraform\")\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster:\n type: gcp:container:Cluster\n properties:\n name: my-cluster\n location: us-central1-a\n initialNodeCount: 1\n membership:\n type: gcp:gkehub:Membership\n properties:\n membershipId: my-membership\n location: us-central1\n endpoint:\n gkeCluster:\n resourceLink: //container.googleapis.com/${cluster.id}\n feature:\n type: gcp:gkehub:Feature\n properties:\n name: configmanagement\n location: global\n labels:\n foo: bar\n featureMember:\n type: gcp:gkehub:FeatureMembership\n name: feature_member\n properties:\n location: global\n feature: ${feature.name}\n membership: ${membership.membershipId}\n membershipLocation: ${membership.location}\n configmanagement:\n version: 1.19.0\n configSync:\n enabled: true\n git:\n syncRepo: https://github.com/hashicorp/terraform\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Policy Controller With Minimal Configuration\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst cluster = new gcp.container.Cluster(\"cluster\", {\n name: \"my-cluster\",\n location: \"us-central1-a\",\n initialNodeCount: 1,\n});\nconst membership = new gcp.gkehub.Membership(\"membership\", {\n membershipId: \"my-membership\",\n endpoint: {\n gkeCluster: {\n resourceLink: pulumi.interpolate`//container.googleapis.com/${cluster.id}`,\n },\n },\n});\nconst feature = new gcp.gkehub.Feature(\"feature\", {\n name: \"policycontroller\",\n location: \"global\",\n});\nconst featureMember = new gcp.gkehub.FeatureMembership(\"feature_member\", {\n location: \"global\",\n feature: feature.name,\n membership: membership.membershipId,\n policycontroller: {\n policyControllerHubConfig: {\n installSpec: \"INSTALL_SPEC_ENABLED\",\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncluster = gcp.container.Cluster(\"cluster\",\n name=\"my-cluster\",\n location=\"us-central1-a\",\n initial_node_count=1)\nmembership = gcp.gkehub.Membership(\"membership\",\n membership_id=\"my-membership\",\n endpoint={\n \"gke_cluster\": {\n \"resource_link\": cluster.id.apply(lambda id: f\"//container.googleapis.com/{id}\"),\n },\n })\nfeature = gcp.gkehub.Feature(\"feature\",\n name=\"policycontroller\",\n location=\"global\")\nfeature_member = gcp.gkehub.FeatureMembership(\"feature_member\",\n location=\"global\",\n feature=feature.name,\n membership=membership.membership_id,\n policycontroller={\n \"policy_controller_hub_config\": {\n \"install_spec\": \"INSTALL_SPEC_ENABLED\",\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var cluster = new Gcp.Container.Cluster(\"cluster\", new()\n {\n Name = \"my-cluster\",\n Location = \"us-central1-a\",\n InitialNodeCount = 1,\n });\n\n var membership = new Gcp.GkeHub.Membership(\"membership\", new()\n {\n MembershipId = \"my-membership\",\n Endpoint = new Gcp.GkeHub.Inputs.MembershipEndpointArgs\n {\n GkeCluster = new Gcp.GkeHub.Inputs.MembershipEndpointGkeClusterArgs\n {\n ResourceLink = cluster.Id.Apply(id =\u003e $\"//container.googleapis.com/{id}\"),\n },\n },\n });\n\n var feature = new Gcp.GkeHub.Feature(\"feature\", new()\n {\n Name = \"policycontroller\",\n Location = \"global\",\n });\n\n var featureMember = new Gcp.GkeHub.FeatureMembership(\"feature_member\", new()\n {\n Location = \"global\",\n Feature = feature.Name,\n Membership = membership.MembershipId,\n Policycontroller = new Gcp.GkeHub.Inputs.FeatureMembershipPolicycontrollerArgs\n {\n PolicyControllerHubConfig = new Gcp.GkeHub.Inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs\n {\n InstallSpec = \"INSTALL_SPEC_ENABLED\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkehub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcluster, err := container.NewCluster(ctx, \"cluster\", \u0026container.ClusterArgs{\n\t\t\tName: pulumi.String(\"my-cluster\"),\n\t\t\tLocation: pulumi.String(\"us-central1-a\"),\n\t\t\tInitialNodeCount: pulumi.Int(1),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmembership, err := gkehub.NewMembership(ctx, \"membership\", \u0026gkehub.MembershipArgs{\n\t\t\tMembershipId: pulumi.String(\"my-membership\"),\n\t\t\tEndpoint: \u0026gkehub.MembershipEndpointArgs{\n\t\t\t\tGkeCluster: \u0026gkehub.MembershipEndpointGkeClusterArgs{\n\t\t\t\t\tResourceLink: cluster.ID().ApplyT(func(id string) (string, error) {\n\t\t\t\t\t\treturn fmt.Sprintf(\"//container.googleapis.com/%v\", id), nil\n\t\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfeature, err := gkehub.NewFeature(ctx, \"feature\", \u0026gkehub.FeatureArgs{\n\t\t\tName: pulumi.String(\"policycontroller\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = gkehub.NewFeatureMembership(ctx, \"feature_member\", \u0026gkehub.FeatureMembershipArgs{\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tFeature: feature.Name,\n\t\t\tMembership: membership.MembershipId,\n\t\t\tPolicycontroller: \u0026gkehub.FeatureMembershipPolicycontrollerArgs{\n\t\t\t\tPolicyControllerHubConfig: \u0026gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs{\n\t\t\t\t\tInstallSpec: pulumi.String(\"INSTALL_SPEC_ENABLED\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.container.Cluster;\nimport com.pulumi.gcp.container.ClusterArgs;\nimport com.pulumi.gcp.gkehub.Membership;\nimport com.pulumi.gcp.gkehub.MembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointGkeClusterArgs;\nimport com.pulumi.gcp.gkehub.Feature;\nimport com.pulumi.gcp.gkehub.FeatureArgs;\nimport com.pulumi.gcp.gkehub.FeatureMembership;\nimport com.pulumi.gcp.gkehub.FeatureMembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipPolicycontrollerArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var cluster = new Cluster(\"cluster\", ClusterArgs.builder()\n .name(\"my-cluster\")\n .location(\"us-central1-a\")\n .initialNodeCount(1)\n .build());\n\n var membership = new Membership(\"membership\", MembershipArgs.builder()\n .membershipId(\"my-membership\")\n .endpoint(MembershipEndpointArgs.builder()\n .gkeCluster(MembershipEndpointGkeClusterArgs.builder()\n .resourceLink(cluster.id().applyValue(id -\u003e String.format(\"//container.googleapis.com/%s\", id)))\n .build())\n .build())\n .build());\n\n var feature = new Feature(\"feature\", FeatureArgs.builder()\n .name(\"policycontroller\")\n .location(\"global\")\n .build());\n\n var featureMember = new FeatureMembership(\"featureMember\", FeatureMembershipArgs.builder()\n .location(\"global\")\n .feature(feature.name())\n .membership(membership.membershipId())\n .policycontroller(FeatureMembershipPolicycontrollerArgs.builder()\n .policyControllerHubConfig(FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs.builder()\n .installSpec(\"INSTALL_SPEC_ENABLED\")\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster:\n type: gcp:container:Cluster\n properties:\n name: my-cluster\n location: us-central1-a\n initialNodeCount: 1\n membership:\n type: gcp:gkehub:Membership\n properties:\n membershipId: my-membership\n endpoint:\n gkeCluster:\n resourceLink: //container.googleapis.com/${cluster.id}\n feature:\n type: gcp:gkehub:Feature\n properties:\n name: policycontroller\n location: global\n featureMember:\n type: gcp:gkehub:FeatureMembership\n name: feature_member\n properties:\n location: global\n feature: ${feature.name}\n membership: ${membership.membershipId}\n policycontroller:\n policyControllerHubConfig:\n installSpec: INSTALL_SPEC_ENABLED\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Policy Controller With Custom Configurations\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst cluster = new gcp.container.Cluster(\"cluster\", {\n name: \"my-cluster\",\n location: \"us-central1-a\",\n initialNodeCount: 1,\n});\nconst membership = new gcp.gkehub.Membership(\"membership\", {\n membershipId: \"my-membership\",\n endpoint: {\n gkeCluster: {\n resourceLink: pulumi.interpolate`//container.googleapis.com/${cluster.id}`,\n },\n },\n});\nconst feature = new gcp.gkehub.Feature(\"feature\", {\n name: \"policycontroller\",\n location: \"global\",\n});\nconst featureMember = new gcp.gkehub.FeatureMembership(\"feature_member\", {\n location: \"global\",\n feature: feature.name,\n membership: membership.membershipId,\n policycontroller: {\n policyControllerHubConfig: {\n installSpec: \"INSTALL_SPEC_SUSPENDED\",\n policyContent: {\n templateLibrary: {\n installation: \"NOT_INSTALLED\",\n },\n },\n constraintViolationLimit: 50,\n auditIntervalSeconds: 120,\n referentialRulesEnabled: true,\n logDeniesEnabled: true,\n mutationEnabled: true,\n },\n version: \"1.17.0\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncluster = gcp.container.Cluster(\"cluster\",\n name=\"my-cluster\",\n location=\"us-central1-a\",\n initial_node_count=1)\nmembership = gcp.gkehub.Membership(\"membership\",\n membership_id=\"my-membership\",\n endpoint={\n \"gke_cluster\": {\n \"resource_link\": cluster.id.apply(lambda id: f\"//container.googleapis.com/{id}\"),\n },\n })\nfeature = gcp.gkehub.Feature(\"feature\",\n name=\"policycontroller\",\n location=\"global\")\nfeature_member = gcp.gkehub.FeatureMembership(\"feature_member\",\n location=\"global\",\n feature=feature.name,\n membership=membership.membership_id,\n policycontroller={\n \"policy_controller_hub_config\": {\n \"install_spec\": \"INSTALL_SPEC_SUSPENDED\",\n \"policy_content\": {\n \"template_library\": {\n \"installation\": \"NOT_INSTALLED\",\n },\n },\n \"constraint_violation_limit\": 50,\n \"audit_interval_seconds\": 120,\n \"referential_rules_enabled\": True,\n \"log_denies_enabled\": True,\n \"mutation_enabled\": True,\n },\n \"version\": \"1.17.0\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var cluster = new Gcp.Container.Cluster(\"cluster\", new()\n {\n Name = \"my-cluster\",\n Location = \"us-central1-a\",\n InitialNodeCount = 1,\n });\n\n var membership = new Gcp.GkeHub.Membership(\"membership\", new()\n {\n MembershipId = \"my-membership\",\n Endpoint = new Gcp.GkeHub.Inputs.MembershipEndpointArgs\n {\n GkeCluster = new Gcp.GkeHub.Inputs.MembershipEndpointGkeClusterArgs\n {\n ResourceLink = cluster.Id.Apply(id =\u003e $\"//container.googleapis.com/{id}\"),\n },\n },\n });\n\n var feature = new Gcp.GkeHub.Feature(\"feature\", new()\n {\n Name = \"policycontroller\",\n Location = \"global\",\n });\n\n var featureMember = new Gcp.GkeHub.FeatureMembership(\"feature_member\", new()\n {\n Location = \"global\",\n Feature = feature.Name,\n Membership = membership.MembershipId,\n Policycontroller = new Gcp.GkeHub.Inputs.FeatureMembershipPolicycontrollerArgs\n {\n PolicyControllerHubConfig = new Gcp.GkeHub.Inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs\n {\n InstallSpec = \"INSTALL_SPEC_SUSPENDED\",\n PolicyContent = new Gcp.GkeHub.Inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentArgs\n {\n TemplateLibrary = new Gcp.GkeHub.Inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs\n {\n Installation = \"NOT_INSTALLED\",\n },\n },\n ConstraintViolationLimit = 50,\n AuditIntervalSeconds = 120,\n ReferentialRulesEnabled = true,\n LogDeniesEnabled = true,\n MutationEnabled = true,\n },\n Version = \"1.17.0\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/container\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/gkehub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcluster, err := container.NewCluster(ctx, \"cluster\", \u0026container.ClusterArgs{\n\t\t\tName: pulumi.String(\"my-cluster\"),\n\t\t\tLocation: pulumi.String(\"us-central1-a\"),\n\t\t\tInitialNodeCount: pulumi.Int(1),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmembership, err := gkehub.NewMembership(ctx, \"membership\", \u0026gkehub.MembershipArgs{\n\t\t\tMembershipId: pulumi.String(\"my-membership\"),\n\t\t\tEndpoint: \u0026gkehub.MembershipEndpointArgs{\n\t\t\t\tGkeCluster: \u0026gkehub.MembershipEndpointGkeClusterArgs{\n\t\t\t\t\tResourceLink: cluster.ID().ApplyT(func(id string) (string, error) {\n\t\t\t\t\t\treturn fmt.Sprintf(\"//container.googleapis.com/%v\", id), nil\n\t\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfeature, err := gkehub.NewFeature(ctx, \"feature\", \u0026gkehub.FeatureArgs{\n\t\t\tName: pulumi.String(\"policycontroller\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = gkehub.NewFeatureMembership(ctx, \"feature_member\", \u0026gkehub.FeatureMembershipArgs{\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tFeature: feature.Name,\n\t\t\tMembership: membership.MembershipId,\n\t\t\tPolicycontroller: \u0026gkehub.FeatureMembershipPolicycontrollerArgs{\n\t\t\t\tPolicyControllerHubConfig: \u0026gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs{\n\t\t\t\t\tInstallSpec: pulumi.String(\"INSTALL_SPEC_SUSPENDED\"),\n\t\t\t\t\tPolicyContent: \u0026gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentArgs{\n\t\t\t\t\t\tTemplateLibrary: \u0026gkehub.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs{\n\t\t\t\t\t\t\tInstallation: pulumi.String(\"NOT_INSTALLED\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tConstraintViolationLimit: pulumi.Int(50),\n\t\t\t\t\tAuditIntervalSeconds: pulumi.Int(120),\n\t\t\t\t\tReferentialRulesEnabled: pulumi.Bool(true),\n\t\t\t\t\tLogDeniesEnabled: pulumi.Bool(true),\n\t\t\t\t\tMutationEnabled: pulumi.Bool(true),\n\t\t\t\t},\n\t\t\t\tVersion: pulumi.String(\"1.17.0\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.container.Cluster;\nimport com.pulumi.gcp.container.ClusterArgs;\nimport com.pulumi.gcp.gkehub.Membership;\nimport com.pulumi.gcp.gkehub.MembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointArgs;\nimport com.pulumi.gcp.gkehub.inputs.MembershipEndpointGkeClusterArgs;\nimport com.pulumi.gcp.gkehub.Feature;\nimport com.pulumi.gcp.gkehub.FeatureArgs;\nimport com.pulumi.gcp.gkehub.FeatureMembership;\nimport com.pulumi.gcp.gkehub.FeatureMembershipArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipPolicycontrollerArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentArgs;\nimport com.pulumi.gcp.gkehub.inputs.FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var cluster = new Cluster(\"cluster\", ClusterArgs.builder()\n .name(\"my-cluster\")\n .location(\"us-central1-a\")\n .initialNodeCount(1)\n .build());\n\n var membership = new Membership(\"membership\", MembershipArgs.builder()\n .membershipId(\"my-membership\")\n .endpoint(MembershipEndpointArgs.builder()\n .gkeCluster(MembershipEndpointGkeClusterArgs.builder()\n .resourceLink(cluster.id().applyValue(id -\u003e String.format(\"//container.googleapis.com/%s\", id)))\n .build())\n .build())\n .build());\n\n var feature = new Feature(\"feature\", FeatureArgs.builder()\n .name(\"policycontroller\")\n .location(\"global\")\n .build());\n\n var featureMember = new FeatureMembership(\"featureMember\", FeatureMembershipArgs.builder()\n .location(\"global\")\n .feature(feature.name())\n .membership(membership.membershipId())\n .policycontroller(FeatureMembershipPolicycontrollerArgs.builder()\n .policyControllerHubConfig(FeatureMembershipPolicycontrollerPolicyControllerHubConfigArgs.builder()\n .installSpec(\"INSTALL_SPEC_SUSPENDED\")\n .policyContent(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentArgs.builder()\n .templateLibrary(FeatureMembershipPolicycontrollerPolicyControllerHubConfigPolicyContentTemplateLibraryArgs.builder()\n .installation(\"NOT_INSTALLED\")\n .build())\n .build())\n .constraintViolationLimit(50)\n .auditIntervalSeconds(120)\n .referentialRulesEnabled(true)\n .logDeniesEnabled(true)\n .mutationEnabled(true)\n .build())\n .version(\"1.17.0\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster:\n type: gcp:container:Cluster\n properties:\n name: my-cluster\n location: us-central1-a\n initialNodeCount: 1\n membership:\n type: gcp:gkehub:Membership\n properties:\n membershipId: my-membership\n endpoint:\n gkeCluster:\n resourceLink: //container.googleapis.com/${cluster.id}\n feature:\n type: gcp:gkehub:Feature\n properties:\n name: policycontroller\n location: global\n featureMember:\n type: gcp:gkehub:FeatureMembership\n name: feature_member\n properties:\n location: global\n feature: ${feature.name}\n membership: ${membership.membershipId}\n policycontroller:\n policyControllerHubConfig:\n installSpec: INSTALL_SPEC_SUSPENDED\n policyContent:\n templateLibrary:\n installation: NOT_INSTALLED\n constraintViolationLimit: 50\n auditIntervalSeconds: 120\n referentialRulesEnabled: true\n logDeniesEnabled: true\n mutationEnabled: true\n version: 1.17.0\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nFeatureMembership can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/features/{{feature}}/membershipId/{{membership}}`\n\n* `{{project}}/{{location}}/{{feature}}/{{membership}}`\n\n* `{{location}}/{{feature}}/{{membership}}`\n\nWhen using the `pulumi import` command, FeatureMembership can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:gkehub/featureMembership:FeatureMembership default projects/{{project}}/locations/{{location}}/features/{{feature}}/membershipId/{{membership}}\n```\n\n```sh\n$ pulumi import gcp:gkehub/featureMembership:FeatureMembership default {{project}}/{{location}}/{{feature}}/{{membership}}\n```\n\n```sh\n$ pulumi import gcp:gkehub/featureMembership:FeatureMembership default {{location}}/{{feature}}/{{membership}}\n```\n\n", "properties": { "configmanagement": { "$ref": "#/types/gcp:gkehub/FeatureMembershipConfigmanagement:FeatureMembershipConfigmanagement", @@ -204360,7 +205928,7 @@ } }, "gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider": { - "description": "A configuration for an external identity provider.\n\n\nTo get more information about WorkloadIdentityPoolProvider, see:\n\n* [API documentation](https://cloud.google.com/iam/docs/reference/rest/v1/projects.locations.workloadIdentityPools.providers)\n* How-to Guides\n * [Managing workload identity providers](https://cloud.google.com/iam/docs/manage-workload-identity-pools-providers#managing_workload_identity_providers)\n\n## Example Usage\n\n### Iam Workload Identity Pool Provider Aws Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n aws: {\n accountId: \"999999999999\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n aws={\n \"account_id\": \"999999999999\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n Aws = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderAwsArgs\n {\n AccountId = \"999999999999\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tAws: \u0026iam.WorkloadIdentityPoolProviderAwsArgs{\n\t\t\t\tAccountId: pulumi.String(\"999999999999\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderAwsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .aws(WorkloadIdentityPoolProviderAwsArgs.builder()\n .accountId(\"999999999999\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n aws:\n accountId: '999999999999'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Iam Workload Identity Pool Provider Aws Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n displayName: \"Name of provider\",\n description: \"AWS identity pool provider for automated test\",\n disabled: true,\n attributeCondition: \"attribute.aws_role==\\\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\\\"\",\n attributeMapping: {\n \"google.subject\": \"assertion.arn\",\n \"attribute.aws_account\": \"assertion.account\",\n \"attribute.environment\": \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\",\n },\n aws: {\n accountId: \"999999999999\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n display_name=\"Name of provider\",\n description=\"AWS identity pool provider for automated test\",\n disabled=True,\n attribute_condition=\"attribute.aws_role==\\\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\\\"\",\n attribute_mapping={\n \"google.subject\": \"assertion.arn\",\n \"attribute.aws_account\": \"assertion.account\",\n \"attribute.environment\": \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\",\n },\n aws={\n \"account_id\": \"999999999999\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n DisplayName = \"Name of provider\",\n Description = \"AWS identity pool provider for automated test\",\n Disabled = true,\n AttributeCondition = \"attribute.aws_role==\\\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\\\"\",\n AttributeMapping = \n {\n { \"google.subject\", \"assertion.arn\" },\n { \"attribute.aws_account\", \"assertion.account\" },\n { \"attribute.environment\", \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\" },\n },\n Aws = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderAwsArgs\n {\n AccountId = \"999999999999\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tDisplayName: pulumi.String(\"Name of provider\"),\n\t\t\tDescription: pulumi.String(\"AWS identity pool provider for automated test\"),\n\t\t\tDisabled: pulumi.Bool(true),\n\t\t\tAttributeCondition: pulumi.String(\"attribute.aws_role==\\\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\\\"\"),\n\t\t\tAttributeMapping: pulumi.StringMap{\n\t\t\t\t\"google.subject\": pulumi.String(\"assertion.arn\"),\n\t\t\t\t\"attribute.aws_account\": pulumi.String(\"assertion.account\"),\n\t\t\t\t\"attribute.environment\": pulumi.String(\"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\"),\n\t\t\t},\n\t\t\tAws: \u0026iam.WorkloadIdentityPoolProviderAwsArgs{\n\t\t\t\tAccountId: pulumi.String(\"999999999999\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderAwsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .displayName(\"Name of provider\")\n .description(\"AWS identity pool provider for automated test\")\n .disabled(true)\n .attributeCondition(\"attribute.aws_role==\\\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\\\"\")\n .attributeMapping(Map.ofEntries(\n Map.entry(\"google.subject\", \"assertion.arn\"),\n Map.entry(\"attribute.aws_account\", \"assertion.account\"),\n Map.entry(\"attribute.environment\", \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\")\n ))\n .aws(WorkloadIdentityPoolProviderAwsArgs.builder()\n .accountId(\"999999999999\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n displayName: Name of provider\n description: AWS identity pool provider for automated test\n disabled: true\n attributeCondition: attribute.aws_role==\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\"\n attributeMapping:\n google.subject: assertion.arn\n attribute.aws_account: assertion.account\n attribute.environment: 'assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"'\n aws:\n accountId: '999999999999'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Iam Workload Identity Pool Provider Oidc Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n attributeMapping: {\n \"google.subject\": \"assertion.sub\",\n },\n oidc: {\n issuerUri: \"https://sts.windows.net/azure-tenant-id\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n attribute_mapping={\n \"google.subject\": \"assertion.sub\",\n },\n oidc={\n \"issuer_uri\": \"https://sts.windows.net/azure-tenant-id\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n AttributeMapping = \n {\n { \"google.subject\", \"assertion.sub\" },\n },\n Oidc = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderOidcArgs\n {\n IssuerUri = \"https://sts.windows.net/azure-tenant-id\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tAttributeMapping: pulumi.StringMap{\n\t\t\t\t\"google.subject\": pulumi.String(\"assertion.sub\"),\n\t\t\t},\n\t\t\tOidc: \u0026iam.WorkloadIdentityPoolProviderOidcArgs{\n\t\t\t\tIssuerUri: pulumi.String(\"https://sts.windows.net/azure-tenant-id\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderOidcArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .attributeMapping(Map.of(\"google.subject\", \"assertion.sub\"))\n .oidc(WorkloadIdentityPoolProviderOidcArgs.builder()\n .issuerUri(\"https://sts.windows.net/azure-tenant-id\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n attributeMapping:\n google.subject: assertion.sub\n oidc:\n issuerUri: https://sts.windows.net/azure-tenant-id\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Iam Workload Identity Pool Provider Oidc Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n displayName: \"Name of provider\",\n description: \"OIDC identity pool provider for automated test\",\n disabled: true,\n attributeCondition: \"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\",\n attributeMapping: {\n \"google.subject\": \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\",\n \"attribute.tid\": \"assertion.tid\",\n \"attribute.managed_identity_name\": ` {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n`,\n },\n oidc: {\n allowedAudiences: [\n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\",\n ],\n issuerUri: \"https://sts.windows.net/azure-tenant-id\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n display_name=\"Name of provider\",\n description=\"OIDC identity pool provider for automated test\",\n disabled=True,\n attribute_condition=\"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\",\n attribute_mapping={\n \"google.subject\": \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\",\n \"attribute.tid\": \"assertion.tid\",\n \"attribute.managed_identity_name\": \"\"\" {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n\"\"\",\n },\n oidc={\n \"allowed_audiences\": [\n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\",\n ],\n \"issuer_uri\": \"https://sts.windows.net/azure-tenant-id\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n DisplayName = \"Name of provider\",\n Description = \"OIDC identity pool provider for automated test\",\n Disabled = true,\n AttributeCondition = \"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\",\n AttributeMapping = \n {\n { \"google.subject\", \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\" },\n { \"attribute.tid\", \"assertion.tid\" },\n { \"attribute.managed_identity_name\", @\" {\n \"\"8bb39bdb-1cc5-4447-b7db-a19e920eb111\"\":\"\"workload1\"\",\n \"\"55d36609-9bcf-48e0-a366-a3cf19027d2a\"\":\"\"workload2\"\"\n }[assertion.oid]\n\" },\n },\n Oidc = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderOidcArgs\n {\n AllowedAudiences = new[]\n {\n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\",\n },\n IssuerUri = \"https://sts.windows.net/azure-tenant-id\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tDisplayName: pulumi.String(\"Name of provider\"),\n\t\t\tDescription: pulumi.String(\"OIDC identity pool provider for automated test\"),\n\t\t\tDisabled: pulumi.Bool(true),\n\t\t\tAttributeCondition: pulumi.String(\"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\"),\n\t\t\tAttributeMapping: pulumi.StringMap{\n\t\t\t\t\"google.subject\": pulumi.String(\"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\"),\n\t\t\t\t\"attribute.tid\": pulumi.String(\"assertion.tid\"),\n\t\t\t\t\"attribute.managed_identity_name\": pulumi.String(\" {\\n \\\"8bb39bdb-1cc5-4447-b7db-a19e920eb111\\\":\\\"workload1\\\",\\n \\\"55d36609-9bcf-48e0-a366-a3cf19027d2a\\\":\\\"workload2\\\"\\n }[assertion.oid]\\n\"),\n\t\t\t},\n\t\t\tOidc: \u0026iam.WorkloadIdentityPoolProviderOidcArgs{\n\t\t\t\tAllowedAudiences: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"https://example.com/gcp-oidc-federation\"),\n\t\t\t\t\tpulumi.String(\"example.com/gcp-oidc-federation\"),\n\t\t\t\t},\n\t\t\t\tIssuerUri: pulumi.String(\"https://sts.windows.net/azure-tenant-id\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderOidcArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .displayName(\"Name of provider\")\n .description(\"OIDC identity pool provider for automated test\")\n .disabled(true)\n .attributeCondition(\"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\")\n .attributeMapping(Map.ofEntries(\n Map.entry(\"google.subject\", \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\"),\n Map.entry(\"attribute.tid\", \"assertion.tid\"),\n Map.entry(\"attribute.managed_identity_name\", \"\"\"\n {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n \"\"\")\n ))\n .oidc(WorkloadIdentityPoolProviderOidcArgs.builder()\n .allowedAudiences( \n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\")\n .issuerUri(\"https://sts.windows.net/azure-tenant-id\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n displayName: Name of provider\n description: OIDC identity pool provider for automated test\n disabled: true\n attributeCondition: '\"e968c2ef-047c-498d-8d79-16ca1b61e77e\" in assertion.groups'\n attributeMapping:\n google.subject: '\"azure::\" + assertion.tid + \"::\" + assertion.sub'\n attribute.tid: assertion.tid\n attribute.managed_identity_name: |2\n {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n oidc:\n allowedAudiences:\n - https://example.com/gcp-oidc-federation\n - example.com/gcp-oidc-federation\n issuerUri: https://sts.windows.net/azure-tenant-id\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Iam Workload Identity Pool Provider Saml Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as std from \"@pulumi/std\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n attributeMapping: {\n \"google.subject\": \"assertion.arn\",\n \"attribute.aws_account\": \"assertion.account\",\n \"attribute.environment\": \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\",\n },\n saml: {\n idpMetadataXml: std.file({\n input: \"test-fixtures/metadata.xml\",\n }).then(invoke =\u003e invoke.result),\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_std as std\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n attribute_mapping={\n \"google.subject\": \"assertion.arn\",\n \"attribute.aws_account\": \"assertion.account\",\n \"attribute.environment\": \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\",\n },\n saml={\n \"idp_metadata_xml\": std.file(input=\"test-fixtures/metadata.xml\").result,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Std = Pulumi.Std;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n AttributeMapping = \n {\n { \"google.subject\", \"assertion.arn\" },\n { \"attribute.aws_account\", \"assertion.account\" },\n { \"attribute.environment\", \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\" },\n },\n Saml = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderSamlArgs\n {\n IdpMetadataXml = Std.File.Invoke(new()\n {\n Input = \"test-fixtures/metadata.xml\",\n }).Apply(invoke =\u003e invoke.Result),\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi-std/sdk/go/std\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinvokeFile, err := std.File(ctx, \u0026std.FileArgs{\n\t\t\tInput: \"test-fixtures/metadata.xml\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tAttributeMapping: pulumi.StringMap{\n\t\t\t\t\"google.subject\": pulumi.String(\"assertion.arn\"),\n\t\t\t\t\"attribute.aws_account\": pulumi.String(\"assertion.account\"),\n\t\t\t\t\"attribute.environment\": pulumi.String(\"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\"),\n\t\t\t},\n\t\t\tSaml: \u0026iam.WorkloadIdentityPoolProviderSamlArgs{\n\t\t\t\tIdpMetadataXml: pulumi.String(invokeFile.Result),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderSamlArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .attributeMapping(Map.ofEntries(\n Map.entry(\"google.subject\", \"assertion.arn\"),\n Map.entry(\"attribute.aws_account\", \"assertion.account\"),\n Map.entry(\"attribute.environment\", \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\")\n ))\n .saml(WorkloadIdentityPoolProviderSamlArgs.builder()\n .idpMetadataXml(StdFunctions.file(FileArgs.builder()\n .input(\"test-fixtures/metadata.xml\")\n .build()).result())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n attributeMapping:\n google.subject: assertion.arn\n attribute.aws_account: assertion.account\n attribute.environment: 'assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"'\n saml:\n idpMetadataXml:\n fn::invoke:\n Function: std:file\n Arguments:\n input: test-fixtures/metadata.xml\n Return: result\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Iam Workload Identity Pool Provider Saml Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as std from \"@pulumi/std\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n displayName: \"Name of provider\",\n description: \"SAML 2.0 identity pool provider for automated test\",\n disabled: true,\n attributeMapping: {\n \"google.subject\": \"assertion.arn\",\n \"attribute.aws_account\": \"assertion.account\",\n \"attribute.environment\": \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\",\n },\n saml: {\n idpMetadataXml: std.file({\n input: \"test-fixtures/metadata.xml\",\n }).then(invoke =\u003e invoke.result),\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_std as std\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n display_name=\"Name of provider\",\n description=\"SAML 2.0 identity pool provider for automated test\",\n disabled=True,\n attribute_mapping={\n \"google.subject\": \"assertion.arn\",\n \"attribute.aws_account\": \"assertion.account\",\n \"attribute.environment\": \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\",\n },\n saml={\n \"idp_metadata_xml\": std.file(input=\"test-fixtures/metadata.xml\").result,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Std = Pulumi.Std;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n DisplayName = \"Name of provider\",\n Description = \"SAML 2.0 identity pool provider for automated test\",\n Disabled = true,\n AttributeMapping = \n {\n { \"google.subject\", \"assertion.arn\" },\n { \"attribute.aws_account\", \"assertion.account\" },\n { \"attribute.environment\", \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\" },\n },\n Saml = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderSamlArgs\n {\n IdpMetadataXml = Std.File.Invoke(new()\n {\n Input = \"test-fixtures/metadata.xml\",\n }).Apply(invoke =\u003e invoke.Result),\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi-std/sdk/go/std\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinvokeFile, err := std.File(ctx, \u0026std.FileArgs{\n\t\t\tInput: \"test-fixtures/metadata.xml\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tDisplayName: pulumi.String(\"Name of provider\"),\n\t\t\tDescription: pulumi.String(\"SAML 2.0 identity pool provider for automated test\"),\n\t\t\tDisabled: pulumi.Bool(true),\n\t\t\tAttributeMapping: pulumi.StringMap{\n\t\t\t\t\"google.subject\": pulumi.String(\"assertion.arn\"),\n\t\t\t\t\"attribute.aws_account\": pulumi.String(\"assertion.account\"),\n\t\t\t\t\"attribute.environment\": pulumi.String(\"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\"),\n\t\t\t},\n\t\t\tSaml: \u0026iam.WorkloadIdentityPoolProviderSamlArgs{\n\t\t\t\tIdpMetadataXml: pulumi.String(invokeFile.Result),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderSamlArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .displayName(\"Name of provider\")\n .description(\"SAML 2.0 identity pool provider for automated test\")\n .disabled(true)\n .attributeMapping(Map.ofEntries(\n Map.entry(\"google.subject\", \"assertion.arn\"),\n Map.entry(\"attribute.aws_account\", \"assertion.account\"),\n Map.entry(\"attribute.environment\", \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\")\n ))\n .saml(WorkloadIdentityPoolProviderSamlArgs.builder()\n .idpMetadataXml(StdFunctions.file(FileArgs.builder()\n .input(\"test-fixtures/metadata.xml\")\n .build()).result())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n displayName: Name of provider\n description: SAML 2.0 identity pool provider for automated test\n disabled: true\n attributeMapping:\n google.subject: assertion.arn\n attribute.aws_account: assertion.account\n attribute.environment: 'assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"'\n saml:\n idpMetadataXml:\n fn::invoke:\n Function: std:file\n Arguments:\n input: test-fixtures/metadata.xml\n Return: result\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Iam Workload Identity Pool Provider Oidc Upload Key\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n displayName: \"Name of provider\",\n description: \"OIDC identity pool provider for automated test\",\n disabled: true,\n attributeCondition: \"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\",\n attributeMapping: {\n \"google.subject\": \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\",\n \"attribute.tid\": \"assertion.tid\",\n \"attribute.managed_identity_name\": ` {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n`,\n },\n oidc: {\n allowedAudiences: [\n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\",\n ],\n issuerUri: \"https://sts.windows.net/azure-tenant-id\",\n jwksJson: \"{\\\"keys\\\":[{\\\"kty\\\":\\\"RSA\\\",\\\"alg\\\":\\\"RS256\\\",\\\"kid\\\":\\\"sif0AR-F6MuvksAyAOv-Pds08Bcf2eUMlxE30NofddA\\\",\\\"use\\\":\\\"sig\\\",\\\"e\\\":\\\"AQAB\\\",\\\"n\\\":\\\"ylH1Chl1tpfti3lh51E1g5dPogzXDaQseqjsefGLknaNl5W6Wd4frBhHyE2t41Q5zgz_Ll0-NvWm0FlaG6brhrN9QZu6sJP1bM8WPfJVPgXOanxi7d7TXCkeNubGeiLTf5R3UXtS9Lm_guemU7MxDjDTelxnlgGCihOVTcL526suNJUdfXtpwUsvdU6_ZnAp9IpsuYjCtwPm9hPumlcZGMbxstdh07O4y4O90cVQClJOKSGQjAUCKJWXIQ0cqffGS_HuS_725CPzQ85SzYZzaNpgfhAER7kx_9P16ARM3BJz0PI5fe2hECE61J4GYU_BY43sxDfs7HyJpEXKLU9eWw\\\"}]}\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n display_name=\"Name of provider\",\n description=\"OIDC identity pool provider for automated test\",\n disabled=True,\n attribute_condition=\"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\",\n attribute_mapping={\n \"google.subject\": \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\",\n \"attribute.tid\": \"assertion.tid\",\n \"attribute.managed_identity_name\": \"\"\" {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n\"\"\",\n },\n oidc={\n \"allowed_audiences\": [\n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\",\n ],\n \"issuer_uri\": \"https://sts.windows.net/azure-tenant-id\",\n \"jwks_json\": \"{\\\"keys\\\":[{\\\"kty\\\":\\\"RSA\\\",\\\"alg\\\":\\\"RS256\\\",\\\"kid\\\":\\\"sif0AR-F6MuvksAyAOv-Pds08Bcf2eUMlxE30NofddA\\\",\\\"use\\\":\\\"sig\\\",\\\"e\\\":\\\"AQAB\\\",\\\"n\\\":\\\"ylH1Chl1tpfti3lh51E1g5dPogzXDaQseqjsefGLknaNl5W6Wd4frBhHyE2t41Q5zgz_Ll0-NvWm0FlaG6brhrN9QZu6sJP1bM8WPfJVPgXOanxi7d7TXCkeNubGeiLTf5R3UXtS9Lm_guemU7MxDjDTelxnlgGCihOVTcL526suNJUdfXtpwUsvdU6_ZnAp9IpsuYjCtwPm9hPumlcZGMbxstdh07O4y4O90cVQClJOKSGQjAUCKJWXIQ0cqffGS_HuS_725CPzQ85SzYZzaNpgfhAER7kx_9P16ARM3BJz0PI5fe2hECE61J4GYU_BY43sxDfs7HyJpEXKLU9eWw\\\"}]}\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n DisplayName = \"Name of provider\",\n Description = \"OIDC identity pool provider for automated test\",\n Disabled = true,\n AttributeCondition = \"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\",\n AttributeMapping = \n {\n { \"google.subject\", \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\" },\n { \"attribute.tid\", \"assertion.tid\" },\n { \"attribute.managed_identity_name\", @\" {\n \"\"8bb39bdb-1cc5-4447-b7db-a19e920eb111\"\":\"\"workload1\"\",\n \"\"55d36609-9bcf-48e0-a366-a3cf19027d2a\"\":\"\"workload2\"\"\n }[assertion.oid]\n\" },\n },\n Oidc = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderOidcArgs\n {\n AllowedAudiences = new[]\n {\n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\",\n },\n IssuerUri = \"https://sts.windows.net/azure-tenant-id\",\n JwksJson = \"{\\\"keys\\\":[{\\\"kty\\\":\\\"RSA\\\",\\\"alg\\\":\\\"RS256\\\",\\\"kid\\\":\\\"sif0AR-F6MuvksAyAOv-Pds08Bcf2eUMlxE30NofddA\\\",\\\"use\\\":\\\"sig\\\",\\\"e\\\":\\\"AQAB\\\",\\\"n\\\":\\\"ylH1Chl1tpfti3lh51E1g5dPogzXDaQseqjsefGLknaNl5W6Wd4frBhHyE2t41Q5zgz_Ll0-NvWm0FlaG6brhrN9QZu6sJP1bM8WPfJVPgXOanxi7d7TXCkeNubGeiLTf5R3UXtS9Lm_guemU7MxDjDTelxnlgGCihOVTcL526suNJUdfXtpwUsvdU6_ZnAp9IpsuYjCtwPm9hPumlcZGMbxstdh07O4y4O90cVQClJOKSGQjAUCKJWXIQ0cqffGS_HuS_725CPzQ85SzYZzaNpgfhAER7kx_9P16ARM3BJz0PI5fe2hECE61J4GYU_BY43sxDfs7HyJpEXKLU9eWw\\\"}]}\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tDisplayName: pulumi.String(\"Name of provider\"),\n\t\t\tDescription: pulumi.String(\"OIDC identity pool provider for automated test\"),\n\t\t\tDisabled: pulumi.Bool(true),\n\t\t\tAttributeCondition: pulumi.String(\"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\"),\n\t\t\tAttributeMapping: pulumi.StringMap{\n\t\t\t\t\"google.subject\": pulumi.String(\"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\"),\n\t\t\t\t\"attribute.tid\": pulumi.String(\"assertion.tid\"),\n\t\t\t\t\"attribute.managed_identity_name\": pulumi.String(\" {\\n \\\"8bb39bdb-1cc5-4447-b7db-a19e920eb111\\\":\\\"workload1\\\",\\n \\\"55d36609-9bcf-48e0-a366-a3cf19027d2a\\\":\\\"workload2\\\"\\n }[assertion.oid]\\n\"),\n\t\t\t},\n\t\t\tOidc: \u0026iam.WorkloadIdentityPoolProviderOidcArgs{\n\t\t\t\tAllowedAudiences: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"https://example.com/gcp-oidc-federation\"),\n\t\t\t\t\tpulumi.String(\"example.com/gcp-oidc-federation\"),\n\t\t\t\t},\n\t\t\t\tIssuerUri: pulumi.String(\"https://sts.windows.net/azure-tenant-id\"),\n\t\t\t\tJwksJson: pulumi.String(\"{\\\"keys\\\":[{\\\"kty\\\":\\\"RSA\\\",\\\"alg\\\":\\\"RS256\\\",\\\"kid\\\":\\\"sif0AR-F6MuvksAyAOv-Pds08Bcf2eUMlxE30NofddA\\\",\\\"use\\\":\\\"sig\\\",\\\"e\\\":\\\"AQAB\\\",\\\"n\\\":\\\"ylH1Chl1tpfti3lh51E1g5dPogzXDaQseqjsefGLknaNl5W6Wd4frBhHyE2t41Q5zgz_Ll0-NvWm0FlaG6brhrN9QZu6sJP1bM8WPfJVPgXOanxi7d7TXCkeNubGeiLTf5R3UXtS9Lm_guemU7MxDjDTelxnlgGCihOVTcL526suNJUdfXtpwUsvdU6_ZnAp9IpsuYjCtwPm9hPumlcZGMbxstdh07O4y4O90cVQClJOKSGQjAUCKJWXIQ0cqffGS_HuS_725CPzQ85SzYZzaNpgfhAER7kx_9P16ARM3BJz0PI5fe2hECE61J4GYU_BY43sxDfs7HyJpEXKLU9eWw\\\"}]}\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderOidcArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .displayName(\"Name of provider\")\n .description(\"OIDC identity pool provider for automated test\")\n .disabled(true)\n .attributeCondition(\"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\")\n .attributeMapping(Map.ofEntries(\n Map.entry(\"google.subject\", \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\"),\n Map.entry(\"attribute.tid\", \"assertion.tid\"),\n Map.entry(\"attribute.managed_identity_name\", \"\"\"\n {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n \"\"\")\n ))\n .oidc(WorkloadIdentityPoolProviderOidcArgs.builder()\n .allowedAudiences( \n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\")\n .issuerUri(\"https://sts.windows.net/azure-tenant-id\")\n .jwksJson(\"{\\\"keys\\\":[{\\\"kty\\\":\\\"RSA\\\",\\\"alg\\\":\\\"RS256\\\",\\\"kid\\\":\\\"sif0AR-F6MuvksAyAOv-Pds08Bcf2eUMlxE30NofddA\\\",\\\"use\\\":\\\"sig\\\",\\\"e\\\":\\\"AQAB\\\",\\\"n\\\":\\\"ylH1Chl1tpfti3lh51E1g5dPogzXDaQseqjsefGLknaNl5W6Wd4frBhHyE2t41Q5zgz_Ll0-NvWm0FlaG6brhrN9QZu6sJP1bM8WPfJVPgXOanxi7d7TXCkeNubGeiLTf5R3UXtS9Lm_guemU7MxDjDTelxnlgGCihOVTcL526suNJUdfXtpwUsvdU6_ZnAp9IpsuYjCtwPm9hPumlcZGMbxstdh07O4y4O90cVQClJOKSGQjAUCKJWXIQ0cqffGS_HuS_725CPzQ85SzYZzaNpgfhAER7kx_9P16ARM3BJz0PI5fe2hECE61J4GYU_BY43sxDfs7HyJpEXKLU9eWw\\\"}]}\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n displayName: Name of provider\n description: OIDC identity pool provider for automated test\n disabled: true\n attributeCondition: '\"e968c2ef-047c-498d-8d79-16ca1b61e77e\" in assertion.groups'\n attributeMapping:\n google.subject: '\"azure::\" + assertion.tid + \"::\" + assertion.sub'\n attribute.tid: assertion.tid\n attribute.managed_identity_name: |2\n {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n oidc:\n allowedAudiences:\n - https://example.com/gcp-oidc-federation\n - example.com/gcp-oidc-federation\n issuerUri: https://sts.windows.net/azure-tenant-id\n jwksJson: '{\"keys\":[{\"kty\":\"RSA\",\"alg\":\"RS256\",\"kid\":\"sif0AR-F6MuvksAyAOv-Pds08Bcf2eUMlxE30NofddA\",\"use\":\"sig\",\"e\":\"AQAB\",\"n\":\"ylH1Chl1tpfti3lh51E1g5dPogzXDaQseqjsefGLknaNl5W6Wd4frBhHyE2t41Q5zgz_Ll0-NvWm0FlaG6brhrN9QZu6sJP1bM8WPfJVPgXOanxi7d7TXCkeNubGeiLTf5R3UXtS9Lm_guemU7MxDjDTelxnlgGCihOVTcL526suNJUdfXtpwUsvdU6_ZnAp9IpsuYjCtwPm9hPumlcZGMbxstdh07O4y4O90cVQClJOKSGQjAUCKJWXIQ0cqffGS_HuS_725CPzQ85SzYZzaNpgfhAER7kx_9P16ARM3BJz0PI5fe2hECE61J4GYU_BY43sxDfs7HyJpEXKLU9eWw\"}]}'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nWorkloadIdentityPoolProvider can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}`\n\n* `{{project}}/{{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}`\n\n* `{{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}`\n\nWhen using the `pulumi import` command, WorkloadIdentityPoolProvider can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}\n```\n\n```sh\n$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{project}}/{{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}\n```\n\n```sh\n$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}\n```\n\n", + "description": "A configuration for an external identity provider.\n\n\nTo get more information about WorkloadIdentityPoolProvider, see:\n\n* [API documentation](https://cloud.google.com/iam/docs/reference/rest/v1/projects.locations.workloadIdentityPools.providers)\n* How-to Guides\n * [Managing workload identity providers](https://cloud.google.com/iam/docs/manage-workload-identity-pools-providers#managing_workload_identity_providers)\n\n## Example Usage\n\n### Iam Workload Identity Pool Provider Aws Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n aws: {\n accountId: \"999999999999\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n aws={\n \"account_id\": \"999999999999\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n Aws = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderAwsArgs\n {\n AccountId = \"999999999999\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tAws: \u0026iam.WorkloadIdentityPoolProviderAwsArgs{\n\t\t\t\tAccountId: pulumi.String(\"999999999999\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderAwsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .aws(WorkloadIdentityPoolProviderAwsArgs.builder()\n .accountId(\"999999999999\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n aws:\n accountId: '999999999999'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Iam Workload Identity Pool Provider Aws Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n displayName: \"Name of provider\",\n description: \"AWS identity pool provider for automated test\",\n disabled: true,\n attributeCondition: \"attribute.aws_role==\\\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\\\"\",\n attributeMapping: {\n \"google.subject\": \"assertion.arn\",\n \"attribute.aws_account\": \"assertion.account\",\n \"attribute.environment\": \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\",\n },\n aws: {\n accountId: \"999999999999\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n display_name=\"Name of provider\",\n description=\"AWS identity pool provider for automated test\",\n disabled=True,\n attribute_condition=\"attribute.aws_role==\\\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\\\"\",\n attribute_mapping={\n \"google.subject\": \"assertion.arn\",\n \"attribute.aws_account\": \"assertion.account\",\n \"attribute.environment\": \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\",\n },\n aws={\n \"account_id\": \"999999999999\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n DisplayName = \"Name of provider\",\n Description = \"AWS identity pool provider for automated test\",\n Disabled = true,\n AttributeCondition = \"attribute.aws_role==\\\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\\\"\",\n AttributeMapping = \n {\n { \"google.subject\", \"assertion.arn\" },\n { \"attribute.aws_account\", \"assertion.account\" },\n { \"attribute.environment\", \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\" },\n },\n Aws = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderAwsArgs\n {\n AccountId = \"999999999999\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tDisplayName: pulumi.String(\"Name of provider\"),\n\t\t\tDescription: pulumi.String(\"AWS identity pool provider for automated test\"),\n\t\t\tDisabled: pulumi.Bool(true),\n\t\t\tAttributeCondition: pulumi.String(\"attribute.aws_role==\\\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\\\"\"),\n\t\t\tAttributeMapping: pulumi.StringMap{\n\t\t\t\t\"google.subject\": pulumi.String(\"assertion.arn\"),\n\t\t\t\t\"attribute.aws_account\": pulumi.String(\"assertion.account\"),\n\t\t\t\t\"attribute.environment\": pulumi.String(\"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\"),\n\t\t\t},\n\t\t\tAws: \u0026iam.WorkloadIdentityPoolProviderAwsArgs{\n\t\t\t\tAccountId: pulumi.String(\"999999999999\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderAwsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .displayName(\"Name of provider\")\n .description(\"AWS identity pool provider for automated test\")\n .disabled(true)\n .attributeCondition(\"attribute.aws_role==\\\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\\\"\")\n .attributeMapping(Map.ofEntries(\n Map.entry(\"google.subject\", \"assertion.arn\"),\n Map.entry(\"attribute.aws_account\", \"assertion.account\"),\n Map.entry(\"attribute.environment\", \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\")\n ))\n .aws(WorkloadIdentityPoolProviderAwsArgs.builder()\n .accountId(\"999999999999\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n displayName: Name of provider\n description: AWS identity pool provider for automated test\n disabled: true\n attributeCondition: attribute.aws_role==\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\"\n attributeMapping:\n google.subject: assertion.arn\n attribute.aws_account: assertion.account\n attribute.environment: 'assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"'\n aws:\n accountId: '999999999999'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Iam Workload Identity Pool Provider Oidc Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n attributeMapping: {\n \"google.subject\": \"assertion.sub\",\n },\n oidc: {\n issuerUri: \"https://sts.windows.net/azure-tenant-id\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n attribute_mapping={\n \"google.subject\": \"assertion.sub\",\n },\n oidc={\n \"issuer_uri\": \"https://sts.windows.net/azure-tenant-id\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n AttributeMapping = \n {\n { \"google.subject\", \"assertion.sub\" },\n },\n Oidc = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderOidcArgs\n {\n IssuerUri = \"https://sts.windows.net/azure-tenant-id\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tAttributeMapping: pulumi.StringMap{\n\t\t\t\t\"google.subject\": pulumi.String(\"assertion.sub\"),\n\t\t\t},\n\t\t\tOidc: \u0026iam.WorkloadIdentityPoolProviderOidcArgs{\n\t\t\t\tIssuerUri: pulumi.String(\"https://sts.windows.net/azure-tenant-id\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderOidcArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .attributeMapping(Map.of(\"google.subject\", \"assertion.sub\"))\n .oidc(WorkloadIdentityPoolProviderOidcArgs.builder()\n .issuerUri(\"https://sts.windows.net/azure-tenant-id\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n attributeMapping:\n google.subject: assertion.sub\n oidc:\n issuerUri: https://sts.windows.net/azure-tenant-id\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Iam Workload Identity Pool Provider Oidc Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n displayName: \"Name of provider\",\n description: \"OIDC identity pool provider for automated test\",\n disabled: true,\n attributeCondition: \"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\",\n attributeMapping: {\n \"google.subject\": \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\",\n \"attribute.tid\": \"assertion.tid\",\n \"attribute.managed_identity_name\": ` {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n`,\n },\n oidc: {\n allowedAudiences: [\n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\",\n ],\n issuerUri: \"https://sts.windows.net/azure-tenant-id\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n display_name=\"Name of provider\",\n description=\"OIDC identity pool provider for automated test\",\n disabled=True,\n attribute_condition=\"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\",\n attribute_mapping={\n \"google.subject\": \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\",\n \"attribute.tid\": \"assertion.tid\",\n \"attribute.managed_identity_name\": \"\"\" {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n\"\"\",\n },\n oidc={\n \"allowed_audiences\": [\n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\",\n ],\n \"issuer_uri\": \"https://sts.windows.net/azure-tenant-id\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n DisplayName = \"Name of provider\",\n Description = \"OIDC identity pool provider for automated test\",\n Disabled = true,\n AttributeCondition = \"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\",\n AttributeMapping = \n {\n { \"google.subject\", \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\" },\n { \"attribute.tid\", \"assertion.tid\" },\n { \"attribute.managed_identity_name\", @\" {\n \"\"8bb39bdb-1cc5-4447-b7db-a19e920eb111\"\":\"\"workload1\"\",\n \"\"55d36609-9bcf-48e0-a366-a3cf19027d2a\"\":\"\"workload2\"\"\n }[assertion.oid]\n\" },\n },\n Oidc = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderOidcArgs\n {\n AllowedAudiences = new[]\n {\n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\",\n },\n IssuerUri = \"https://sts.windows.net/azure-tenant-id\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tDisplayName: pulumi.String(\"Name of provider\"),\n\t\t\tDescription: pulumi.String(\"OIDC identity pool provider for automated test\"),\n\t\t\tDisabled: pulumi.Bool(true),\n\t\t\tAttributeCondition: pulumi.String(\"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\"),\n\t\t\tAttributeMapping: pulumi.StringMap{\n\t\t\t\t\"google.subject\": pulumi.String(\"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\"),\n\t\t\t\t\"attribute.tid\": pulumi.String(\"assertion.tid\"),\n\t\t\t\t\"attribute.managed_identity_name\": pulumi.String(\" {\\n \\\"8bb39bdb-1cc5-4447-b7db-a19e920eb111\\\":\\\"workload1\\\",\\n \\\"55d36609-9bcf-48e0-a366-a3cf19027d2a\\\":\\\"workload2\\\"\\n }[assertion.oid]\\n\"),\n\t\t\t},\n\t\t\tOidc: \u0026iam.WorkloadIdentityPoolProviderOidcArgs{\n\t\t\t\tAllowedAudiences: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"https://example.com/gcp-oidc-federation\"),\n\t\t\t\t\tpulumi.String(\"example.com/gcp-oidc-federation\"),\n\t\t\t\t},\n\t\t\t\tIssuerUri: pulumi.String(\"https://sts.windows.net/azure-tenant-id\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderOidcArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .displayName(\"Name of provider\")\n .description(\"OIDC identity pool provider for automated test\")\n .disabled(true)\n .attributeCondition(\"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\")\n .attributeMapping(Map.ofEntries(\n Map.entry(\"google.subject\", \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\"),\n Map.entry(\"attribute.tid\", \"assertion.tid\"),\n Map.entry(\"attribute.managed_identity_name\", \"\"\"\n {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n \"\"\")\n ))\n .oidc(WorkloadIdentityPoolProviderOidcArgs.builder()\n .allowedAudiences( \n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\")\n .issuerUri(\"https://sts.windows.net/azure-tenant-id\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n displayName: Name of provider\n description: OIDC identity pool provider for automated test\n disabled: true\n attributeCondition: '\"e968c2ef-047c-498d-8d79-16ca1b61e77e\" in assertion.groups'\n attributeMapping:\n google.subject: '\"azure::\" + assertion.tid + \"::\" + assertion.sub'\n attribute.tid: assertion.tid\n attribute.managed_identity_name: |2\n {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n oidc:\n allowedAudiences:\n - https://example.com/gcp-oidc-federation\n - example.com/gcp-oidc-federation\n issuerUri: https://sts.windows.net/azure-tenant-id\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Iam Workload Identity Pool Provider Saml Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as std from \"@pulumi/std\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n attributeMapping: {\n \"google.subject\": \"assertion.arn\",\n \"attribute.aws_account\": \"assertion.account\",\n \"attribute.environment\": \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\",\n },\n saml: {\n idpMetadataXml: std.file({\n input: \"test-fixtures/metadata.xml\",\n }).then(invoke =\u003e invoke.result),\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_std as std\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n attribute_mapping={\n \"google.subject\": \"assertion.arn\",\n \"attribute.aws_account\": \"assertion.account\",\n \"attribute.environment\": \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\",\n },\n saml={\n \"idp_metadata_xml\": std.file(input=\"test-fixtures/metadata.xml\").result,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Std = Pulumi.Std;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n AttributeMapping = \n {\n { \"google.subject\", \"assertion.arn\" },\n { \"attribute.aws_account\", \"assertion.account\" },\n { \"attribute.environment\", \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\" },\n },\n Saml = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderSamlArgs\n {\n IdpMetadataXml = Std.File.Invoke(new()\n {\n Input = \"test-fixtures/metadata.xml\",\n }).Apply(invoke =\u003e invoke.Result),\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi-std/sdk/go/std\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinvokeFile, err := std.File(ctx, \u0026std.FileArgs{\n\t\t\tInput: \"test-fixtures/metadata.xml\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tAttributeMapping: pulumi.StringMap{\n\t\t\t\t\"google.subject\": pulumi.String(\"assertion.arn\"),\n\t\t\t\t\"attribute.aws_account\": pulumi.String(\"assertion.account\"),\n\t\t\t\t\"attribute.environment\": pulumi.String(\"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\"),\n\t\t\t},\n\t\t\tSaml: \u0026iam.WorkloadIdentityPoolProviderSamlArgs{\n\t\t\t\tIdpMetadataXml: pulumi.String(invokeFile.Result),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderSamlArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .attributeMapping(Map.ofEntries(\n Map.entry(\"google.subject\", \"assertion.arn\"),\n Map.entry(\"attribute.aws_account\", \"assertion.account\"),\n Map.entry(\"attribute.environment\", \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\")\n ))\n .saml(WorkloadIdentityPoolProviderSamlArgs.builder()\n .idpMetadataXml(StdFunctions.file(FileArgs.builder()\n .input(\"test-fixtures/metadata.xml\")\n .build()).result())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n attributeMapping:\n google.subject: assertion.arn\n attribute.aws_account: assertion.account\n attribute.environment: 'assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"'\n saml:\n idpMetadataXml:\n fn::invoke:\n Function: std:file\n Arguments:\n input: test-fixtures/metadata.xml\n Return: result\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Iam Workload Identity Pool Provider Saml Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as std from \"@pulumi/std\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n displayName: \"Name of provider\",\n description: \"SAML 2.0 identity pool provider for automated test\",\n disabled: true,\n attributeMapping: {\n \"google.subject\": \"assertion.arn\",\n \"attribute.aws_account\": \"assertion.account\",\n \"attribute.environment\": \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\",\n },\n saml: {\n idpMetadataXml: std.file({\n input: \"test-fixtures/metadata.xml\",\n }).then(invoke =\u003e invoke.result),\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_std as std\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n display_name=\"Name of provider\",\n description=\"SAML 2.0 identity pool provider for automated test\",\n disabled=True,\n attribute_mapping={\n \"google.subject\": \"assertion.arn\",\n \"attribute.aws_account\": \"assertion.account\",\n \"attribute.environment\": \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\",\n },\n saml={\n \"idp_metadata_xml\": std.file(input=\"test-fixtures/metadata.xml\").result,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Std = Pulumi.Std;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n DisplayName = \"Name of provider\",\n Description = \"SAML 2.0 identity pool provider for automated test\",\n Disabled = true,\n AttributeMapping = \n {\n { \"google.subject\", \"assertion.arn\" },\n { \"attribute.aws_account\", \"assertion.account\" },\n { \"attribute.environment\", \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\" },\n },\n Saml = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderSamlArgs\n {\n IdpMetadataXml = Std.File.Invoke(new()\n {\n Input = \"test-fixtures/metadata.xml\",\n }).Apply(invoke =\u003e invoke.Result),\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi-std/sdk/go/std\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinvokeFile, err := std.File(ctx, \u0026std.FileArgs{\n\t\t\tInput: \"test-fixtures/metadata.xml\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tDisplayName: pulumi.String(\"Name of provider\"),\n\t\t\tDescription: pulumi.String(\"SAML 2.0 identity pool provider for automated test\"),\n\t\t\tDisabled: pulumi.Bool(true),\n\t\t\tAttributeMapping: pulumi.StringMap{\n\t\t\t\t\"google.subject\": pulumi.String(\"assertion.arn\"),\n\t\t\t\t\"attribute.aws_account\": pulumi.String(\"assertion.account\"),\n\t\t\t\t\"attribute.environment\": pulumi.String(\"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\"),\n\t\t\t},\n\t\t\tSaml: \u0026iam.WorkloadIdentityPoolProviderSamlArgs{\n\t\t\t\tIdpMetadataXml: pulumi.String(invokeFile.Result),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderSamlArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .displayName(\"Name of provider\")\n .description(\"SAML 2.0 identity pool provider for automated test\")\n .disabled(true)\n .attributeMapping(Map.ofEntries(\n Map.entry(\"google.subject\", \"assertion.arn\"),\n Map.entry(\"attribute.aws_account\", \"assertion.account\"),\n Map.entry(\"attribute.environment\", \"assertion.arn.contains(\\\":instance-profile/Production\\\") ? \\\"prod\\\" : \\\"test\\\"\")\n ))\n .saml(WorkloadIdentityPoolProviderSamlArgs.builder()\n .idpMetadataXml(StdFunctions.file(FileArgs.builder()\n .input(\"test-fixtures/metadata.xml\")\n .build()).result())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n displayName: Name of provider\n description: SAML 2.0 identity pool provider for automated test\n disabled: true\n attributeMapping:\n google.subject: assertion.arn\n attribute.aws_account: assertion.account\n attribute.environment: 'assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"'\n saml:\n idpMetadataXml:\n fn::invoke:\n Function: std:file\n Arguments:\n input: test-fixtures/metadata.xml\n Return: result\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Iam Workload Identity Pool Provider Oidc Upload Key\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n displayName: \"Name of provider\",\n description: \"OIDC identity pool provider for automated test\",\n disabled: true,\n attributeCondition: \"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\",\n attributeMapping: {\n \"google.subject\": \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\",\n \"attribute.tid\": \"assertion.tid\",\n \"attribute.managed_identity_name\": ` {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n`,\n },\n oidc: {\n allowedAudiences: [\n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\",\n ],\n issuerUri: \"https://sts.windows.net/azure-tenant-id\",\n jwksJson: \"{\\\"keys\\\":[{\\\"kty\\\":\\\"RSA\\\",\\\"alg\\\":\\\"RS256\\\",\\\"kid\\\":\\\"sif0AR-F6MuvksAyAOv-Pds08Bcf2eUMlxE30NofddA\\\",\\\"use\\\":\\\"sig\\\",\\\"e\\\":\\\"AQAB\\\",\\\"n\\\":\\\"ylH1Chl1tpfti3lh51E1g5dPogzXDaQseqjsefGLknaNl5W6Wd4frBhHyE2t41Q5zgz_Ll0-NvWm0FlaG6brhrN9QZu6sJP1bM8WPfJVPgXOanxi7d7TXCkeNubGeiLTf5R3UXtS9Lm_guemU7MxDjDTelxnlgGCihOVTcL526suNJUdfXtpwUsvdU6_ZnAp9IpsuYjCtwPm9hPumlcZGMbxstdh07O4y4O90cVQClJOKSGQjAUCKJWXIQ0cqffGS_HuS_725CPzQ85SzYZzaNpgfhAER7kx_9P16ARM3BJz0PI5fe2hECE61J4GYU_BY43sxDfs7HyJpEXKLU9eWw\\\"}]}\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n display_name=\"Name of provider\",\n description=\"OIDC identity pool provider for automated test\",\n disabled=True,\n attribute_condition=\"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\",\n attribute_mapping={\n \"google.subject\": \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\",\n \"attribute.tid\": \"assertion.tid\",\n \"attribute.managed_identity_name\": \"\"\" {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n\"\"\",\n },\n oidc={\n \"allowed_audiences\": [\n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\",\n ],\n \"issuer_uri\": \"https://sts.windows.net/azure-tenant-id\",\n \"jwks_json\": \"{\\\"keys\\\":[{\\\"kty\\\":\\\"RSA\\\",\\\"alg\\\":\\\"RS256\\\",\\\"kid\\\":\\\"sif0AR-F6MuvksAyAOv-Pds08Bcf2eUMlxE30NofddA\\\",\\\"use\\\":\\\"sig\\\",\\\"e\\\":\\\"AQAB\\\",\\\"n\\\":\\\"ylH1Chl1tpfti3lh51E1g5dPogzXDaQseqjsefGLknaNl5W6Wd4frBhHyE2t41Q5zgz_Ll0-NvWm0FlaG6brhrN9QZu6sJP1bM8WPfJVPgXOanxi7d7TXCkeNubGeiLTf5R3UXtS9Lm_guemU7MxDjDTelxnlgGCihOVTcL526suNJUdfXtpwUsvdU6_ZnAp9IpsuYjCtwPm9hPumlcZGMbxstdh07O4y4O90cVQClJOKSGQjAUCKJWXIQ0cqffGS_HuS_725CPzQ85SzYZzaNpgfhAER7kx_9P16ARM3BJz0PI5fe2hECE61J4GYU_BY43sxDfs7HyJpEXKLU9eWw\\\"}]}\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n DisplayName = \"Name of provider\",\n Description = \"OIDC identity pool provider for automated test\",\n Disabled = true,\n AttributeCondition = \"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\",\n AttributeMapping = \n {\n { \"google.subject\", \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\" },\n { \"attribute.tid\", \"assertion.tid\" },\n { \"attribute.managed_identity_name\", @\" {\n \"\"8bb39bdb-1cc5-4447-b7db-a19e920eb111\"\":\"\"workload1\"\",\n \"\"55d36609-9bcf-48e0-a366-a3cf19027d2a\"\":\"\"workload2\"\"\n }[assertion.oid]\n\" },\n },\n Oidc = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderOidcArgs\n {\n AllowedAudiences = new[]\n {\n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\",\n },\n IssuerUri = \"https://sts.windows.net/azure-tenant-id\",\n JwksJson = \"{\\\"keys\\\":[{\\\"kty\\\":\\\"RSA\\\",\\\"alg\\\":\\\"RS256\\\",\\\"kid\\\":\\\"sif0AR-F6MuvksAyAOv-Pds08Bcf2eUMlxE30NofddA\\\",\\\"use\\\":\\\"sig\\\",\\\"e\\\":\\\"AQAB\\\",\\\"n\\\":\\\"ylH1Chl1tpfti3lh51E1g5dPogzXDaQseqjsefGLknaNl5W6Wd4frBhHyE2t41Q5zgz_Ll0-NvWm0FlaG6brhrN9QZu6sJP1bM8WPfJVPgXOanxi7d7TXCkeNubGeiLTf5R3UXtS9Lm_guemU7MxDjDTelxnlgGCihOVTcL526suNJUdfXtpwUsvdU6_ZnAp9IpsuYjCtwPm9hPumlcZGMbxstdh07O4y4O90cVQClJOKSGQjAUCKJWXIQ0cqffGS_HuS_725CPzQ85SzYZzaNpgfhAER7kx_9P16ARM3BJz0PI5fe2hECE61J4GYU_BY43sxDfs7HyJpEXKLU9eWw\\\"}]}\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tDisplayName: pulumi.String(\"Name of provider\"),\n\t\t\tDescription: pulumi.String(\"OIDC identity pool provider for automated test\"),\n\t\t\tDisabled: pulumi.Bool(true),\n\t\t\tAttributeCondition: pulumi.String(\"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\"),\n\t\t\tAttributeMapping: pulumi.StringMap{\n\t\t\t\t\"google.subject\": pulumi.String(\"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\"),\n\t\t\t\t\"attribute.tid\": pulumi.String(\"assertion.tid\"),\n\t\t\t\t\"attribute.managed_identity_name\": pulumi.String(\" {\\n \\\"8bb39bdb-1cc5-4447-b7db-a19e920eb111\\\":\\\"workload1\\\",\\n \\\"55d36609-9bcf-48e0-a366-a3cf19027d2a\\\":\\\"workload2\\\"\\n }[assertion.oid]\\n\"),\n\t\t\t},\n\t\t\tOidc: \u0026iam.WorkloadIdentityPoolProviderOidcArgs{\n\t\t\t\tAllowedAudiences: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"https://example.com/gcp-oidc-federation\"),\n\t\t\t\t\tpulumi.String(\"example.com/gcp-oidc-federation\"),\n\t\t\t\t},\n\t\t\t\tIssuerUri: pulumi.String(\"https://sts.windows.net/azure-tenant-id\"),\n\t\t\t\tJwksJson: pulumi.String(\"{\\\"keys\\\":[{\\\"kty\\\":\\\"RSA\\\",\\\"alg\\\":\\\"RS256\\\",\\\"kid\\\":\\\"sif0AR-F6MuvksAyAOv-Pds08Bcf2eUMlxE30NofddA\\\",\\\"use\\\":\\\"sig\\\",\\\"e\\\":\\\"AQAB\\\",\\\"n\\\":\\\"ylH1Chl1tpfti3lh51E1g5dPogzXDaQseqjsefGLknaNl5W6Wd4frBhHyE2t41Q5zgz_Ll0-NvWm0FlaG6brhrN9QZu6sJP1bM8WPfJVPgXOanxi7d7TXCkeNubGeiLTf5R3UXtS9Lm_guemU7MxDjDTelxnlgGCihOVTcL526suNJUdfXtpwUsvdU6_ZnAp9IpsuYjCtwPm9hPumlcZGMbxstdh07O4y4O90cVQClJOKSGQjAUCKJWXIQ0cqffGS_HuS_725CPzQ85SzYZzaNpgfhAER7kx_9P16ARM3BJz0PI5fe2hECE61J4GYU_BY43sxDfs7HyJpEXKLU9eWw\\\"}]}\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderOidcArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .displayName(\"Name of provider\")\n .description(\"OIDC identity pool provider for automated test\")\n .disabled(true)\n .attributeCondition(\"\\\"e968c2ef-047c-498d-8d79-16ca1b61e77e\\\" in assertion.groups\")\n .attributeMapping(Map.ofEntries(\n Map.entry(\"google.subject\", \"\\\"azure::\\\" + assertion.tid + \\\"::\\\" + assertion.sub\"),\n Map.entry(\"attribute.tid\", \"assertion.tid\"),\n Map.entry(\"attribute.managed_identity_name\", \"\"\"\n {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n \"\"\")\n ))\n .oidc(WorkloadIdentityPoolProviderOidcArgs.builder()\n .allowedAudiences( \n \"https://example.com/gcp-oidc-federation\",\n \"example.com/gcp-oidc-federation\")\n .issuerUri(\"https://sts.windows.net/azure-tenant-id\")\n .jwksJson(\"{\\\"keys\\\":[{\\\"kty\\\":\\\"RSA\\\",\\\"alg\\\":\\\"RS256\\\",\\\"kid\\\":\\\"sif0AR-F6MuvksAyAOv-Pds08Bcf2eUMlxE30NofddA\\\",\\\"use\\\":\\\"sig\\\",\\\"e\\\":\\\"AQAB\\\",\\\"n\\\":\\\"ylH1Chl1tpfti3lh51E1g5dPogzXDaQseqjsefGLknaNl5W6Wd4frBhHyE2t41Q5zgz_Ll0-NvWm0FlaG6brhrN9QZu6sJP1bM8WPfJVPgXOanxi7d7TXCkeNubGeiLTf5R3UXtS9Lm_guemU7MxDjDTelxnlgGCihOVTcL526suNJUdfXtpwUsvdU6_ZnAp9IpsuYjCtwPm9hPumlcZGMbxstdh07O4y4O90cVQClJOKSGQjAUCKJWXIQ0cqffGS_HuS_725CPzQ85SzYZzaNpgfhAER7kx_9P16ARM3BJz0PI5fe2hECE61J4GYU_BY43sxDfs7HyJpEXKLU9eWw\\\"}]}\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n displayName: Name of provider\n description: OIDC identity pool provider for automated test\n disabled: true\n attributeCondition: '\"e968c2ef-047c-498d-8d79-16ca1b61e77e\" in assertion.groups'\n attributeMapping:\n google.subject: '\"azure::\" + assertion.tid + \"::\" + assertion.sub'\n attribute.tid: assertion.tid\n attribute.managed_identity_name: |2\n {\n \"8bb39bdb-1cc5-4447-b7db-a19e920eb111\":\"workload1\",\n \"55d36609-9bcf-48e0-a366-a3cf19027d2a\":\"workload2\"\n }[assertion.oid]\n oidc:\n allowedAudiences:\n - https://example.com/gcp-oidc-federation\n - example.com/gcp-oidc-federation\n issuerUri: https://sts.windows.net/azure-tenant-id\n jwksJson: '{\"keys\":[{\"kty\":\"RSA\",\"alg\":\"RS256\",\"kid\":\"sif0AR-F6MuvksAyAOv-Pds08Bcf2eUMlxE30NofddA\",\"use\":\"sig\",\"e\":\"AQAB\",\"n\":\"ylH1Chl1tpfti3lh51E1g5dPogzXDaQseqjsefGLknaNl5W6Wd4frBhHyE2t41Q5zgz_Ll0-NvWm0FlaG6brhrN9QZu6sJP1bM8WPfJVPgXOanxi7d7TXCkeNubGeiLTf5R3UXtS9Lm_guemU7MxDjDTelxnlgGCihOVTcL526suNJUdfXtpwUsvdU6_ZnAp9IpsuYjCtwPm9hPumlcZGMbxstdh07O4y4O90cVQClJOKSGQjAUCKJWXIQ0cqffGS_HuS_725CPzQ85SzYZzaNpgfhAER7kx_9P16ARM3BJz0PI5fe2hECE61J4GYU_BY43sxDfs7HyJpEXKLU9eWw\"}]}'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Iam Workload Identity Pool Provider X509 Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as std from \"@pulumi/std\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n attributeMapping: {\n \"google.subject\": \"assertion.subject.dn.cn\",\n },\n x509: {\n trustStore: {\n trustAnchors: [{\n pemCertificate: std.file({\n input: \"test-fixtures/trust_anchor.pem\",\n }).then(invoke =\u003e invoke.result),\n }],\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_std as std\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n attribute_mapping={\n \"google.subject\": \"assertion.subject.dn.cn\",\n },\n x509={\n \"trust_store\": {\n \"trust_anchors\": [{\n \"pem_certificate\": std.file(input=\"test-fixtures/trust_anchor.pem\").result,\n }],\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Std = Pulumi.Std;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n AttributeMapping = \n {\n { \"google.subject\", \"assertion.subject.dn.cn\" },\n },\n X509 = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderX509Args\n {\n TrustStore = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderX509TrustStoreArgs\n {\n TrustAnchors = new[]\n {\n new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs\n {\n PemCertificate = Std.File.Invoke(new()\n {\n Input = \"test-fixtures/trust_anchor.pem\",\n }).Apply(invoke =\u003e invoke.Result),\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi-std/sdk/go/std\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinvokeFile, err := std.File(ctx, \u0026std.FileArgs{\n\t\t\tInput: \"test-fixtures/trust_anchor.pem\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tAttributeMapping: pulumi.StringMap{\n\t\t\t\t\"google.subject\": pulumi.String(\"assertion.subject.dn.cn\"),\n\t\t\t},\n\t\t\tX509: \u0026iam.WorkloadIdentityPoolProviderX509Args{\n\t\t\t\tTrustStore: \u0026iam.WorkloadIdentityPoolProviderX509TrustStoreArgs{\n\t\t\t\t\tTrustAnchors: iam.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray{\n\t\t\t\t\t\t\u0026iam.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs{\n\t\t\t\t\t\t\tPemCertificate: pulumi.String(invokeFile.Result),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderX509Args;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderX509TrustStoreArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .attributeMapping(Map.of(\"google.subject\", \"assertion.subject.dn.cn\"))\n .x509(WorkloadIdentityPoolProviderX509Args.builder()\n .trustStore(WorkloadIdentityPoolProviderX509TrustStoreArgs.builder()\n .trustAnchors(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs.builder()\n .pemCertificate(StdFunctions.file(FileArgs.builder()\n .input(\"test-fixtures/trust_anchor.pem\")\n .build()).result())\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n attributeMapping:\n google.subject: assertion.subject.dn.cn\n x509:\n trustStore:\n trustAnchors:\n - pemCertificate:\n fn::invoke:\n Function: std:file\n Arguments:\n input: test-fixtures/trust_anchor.pem\n Return: result\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Iam Workload Identity Pool Provider X509 Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as std from \"@pulumi/std\";\n\nconst pool = new gcp.iam.WorkloadIdentityPool(\"pool\", {workloadIdentityPoolId: \"example-pool\"});\nconst example = new gcp.iam.WorkloadIdentityPoolProvider(\"example\", {\n workloadIdentityPoolId: pool.workloadIdentityPoolId,\n workloadIdentityPoolProviderId: \"example-prvdr\",\n displayName: \"Name of provider\",\n description: \"X.509 identity pool provider for automated test\",\n disabled: true,\n attributeMapping: {\n \"google.subject\": \"assertion.subject.dn.cn\",\n },\n x509: {\n trustStore: {\n trustAnchors: [{\n pemCertificate: std.file({\n input: \"test-fixtures/trust_anchor.pem\",\n }).then(invoke =\u003e invoke.result),\n }],\n intermediateCas: [{\n pemCertificate: std.file({\n input: \"test-fixtures/intermediate_ca.pem\",\n }).then(invoke =\u003e invoke.result),\n }],\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_std as std\n\npool = gcp.iam.WorkloadIdentityPool(\"pool\", workload_identity_pool_id=\"example-pool\")\nexample = gcp.iam.WorkloadIdentityPoolProvider(\"example\",\n workload_identity_pool_id=pool.workload_identity_pool_id,\n workload_identity_pool_provider_id=\"example-prvdr\",\n display_name=\"Name of provider\",\n description=\"X.509 identity pool provider for automated test\",\n disabled=True,\n attribute_mapping={\n \"google.subject\": \"assertion.subject.dn.cn\",\n },\n x509={\n \"trust_store\": {\n \"trust_anchors\": [{\n \"pem_certificate\": std.file(input=\"test-fixtures/trust_anchor.pem\").result,\n }],\n \"intermediate_cas\": [{\n \"pem_certificate\": std.file(input=\"test-fixtures/intermediate_ca.pem\").result,\n }],\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Std = Pulumi.Std;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var pool = new Gcp.Iam.WorkloadIdentityPool(\"pool\", new()\n {\n WorkloadIdentityPoolId = \"example-pool\",\n });\n\n var example = new Gcp.Iam.WorkloadIdentityPoolProvider(\"example\", new()\n {\n WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId,\n WorkloadIdentityPoolProviderId = \"example-prvdr\",\n DisplayName = \"Name of provider\",\n Description = \"X.509 identity pool provider for automated test\",\n Disabled = true,\n AttributeMapping = \n {\n { \"google.subject\", \"assertion.subject.dn.cn\" },\n },\n X509 = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderX509Args\n {\n TrustStore = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderX509TrustStoreArgs\n {\n TrustAnchors = new[]\n {\n new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs\n {\n PemCertificate = Std.File.Invoke(new()\n {\n Input = \"test-fixtures/trust_anchor.pem\",\n }).Apply(invoke =\u003e invoke.Result),\n },\n },\n IntermediateCas = new[]\n {\n new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs\n {\n PemCertificate = Std.File.Invoke(new()\n {\n Input = \"test-fixtures/intermediate_ca.pem\",\n }).Apply(invoke =\u003e invoke.Result),\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam\"\n\t\"github.com/pulumi/pulumi-std/sdk/go/std\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tpool, err := iam.NewWorkloadIdentityPool(ctx, \"pool\", \u0026iam.WorkloadIdentityPoolArgs{\n\t\t\tWorkloadIdentityPoolId: pulumi.String(\"example-pool\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinvokeFile, err := std.File(ctx, \u0026std.FileArgs{\n\t\t\tInput: \"test-fixtures/trust_anchor.pem\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinvokeFile1, err := std.File(ctx, \u0026std.FileArgs{\n\t\t\tInput: \"test-fixtures/intermediate_ca.pem\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = iam.NewWorkloadIdentityPoolProvider(ctx, \"example\", \u0026iam.WorkloadIdentityPoolProviderArgs{\n\t\t\tWorkloadIdentityPoolId: pool.WorkloadIdentityPoolId,\n\t\t\tWorkloadIdentityPoolProviderId: pulumi.String(\"example-prvdr\"),\n\t\t\tDisplayName: pulumi.String(\"Name of provider\"),\n\t\t\tDescription: pulumi.String(\"X.509 identity pool provider for automated test\"),\n\t\t\tDisabled: pulumi.Bool(true),\n\t\t\tAttributeMapping: pulumi.StringMap{\n\t\t\t\t\"google.subject\": pulumi.String(\"assertion.subject.dn.cn\"),\n\t\t\t},\n\t\t\tX509: \u0026iam.WorkloadIdentityPoolProviderX509Args{\n\t\t\t\tTrustStore: \u0026iam.WorkloadIdentityPoolProviderX509TrustStoreArgs{\n\t\t\t\t\tTrustAnchors: iam.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray{\n\t\t\t\t\t\t\u0026iam.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs{\n\t\t\t\t\t\t\tPemCertificate: pulumi.String(invokeFile.Result),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tIntermediateCas: iam.WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray{\n\t\t\t\t\t\t\u0026iam.WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs{\n\t\t\t\t\t\t\tPemCertificate: pulumi.String(invokeFile1.Result),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.iam.WorkloadIdentityPool;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;\nimport com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderX509Args;\nimport com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderX509TrustStoreArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var pool = new WorkloadIdentityPool(\"pool\", WorkloadIdentityPoolArgs.builder()\n .workloadIdentityPoolId(\"example-pool\")\n .build());\n\n var example = new WorkloadIdentityPoolProvider(\"example\", WorkloadIdentityPoolProviderArgs.builder()\n .workloadIdentityPoolId(pool.workloadIdentityPoolId())\n .workloadIdentityPoolProviderId(\"example-prvdr\")\n .displayName(\"Name of provider\")\n .description(\"X.509 identity pool provider for automated test\")\n .disabled(true)\n .attributeMapping(Map.of(\"google.subject\", \"assertion.subject.dn.cn\"))\n .x509(WorkloadIdentityPoolProviderX509Args.builder()\n .trustStore(WorkloadIdentityPoolProviderX509TrustStoreArgs.builder()\n .trustAnchors(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs.builder()\n .pemCertificate(StdFunctions.file(FileArgs.builder()\n .input(\"test-fixtures/trust_anchor.pem\")\n .build()).result())\n .build())\n .intermediateCas(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs.builder()\n .pemCertificate(StdFunctions.file(FileArgs.builder()\n .input(\"test-fixtures/intermediate_ca.pem\")\n .build()).result())\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n pool:\n type: gcp:iam:WorkloadIdentityPool\n properties:\n workloadIdentityPoolId: example-pool\n example:\n type: gcp:iam:WorkloadIdentityPoolProvider\n properties:\n workloadIdentityPoolId: ${pool.workloadIdentityPoolId}\n workloadIdentityPoolProviderId: example-prvdr\n displayName: Name of provider\n description: X.509 identity pool provider for automated test\n disabled: true\n attributeMapping:\n google.subject: assertion.subject.dn.cn\n x509:\n trustStore:\n trustAnchors:\n - pemCertificate:\n fn::invoke:\n Function: std:file\n Arguments:\n input: test-fixtures/trust_anchor.pem\n Return: result\n intermediateCas:\n - pemCertificate:\n fn::invoke:\n Function: std:file\n Arguments:\n input: test-fixtures/intermediate_ca.pem\n Return: result\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nWorkloadIdentityPoolProvider can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}`\n\n* `{{project}}/{{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}`\n\n* `{{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}`\n\nWhen using the `pulumi import` command, WorkloadIdentityPoolProvider can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}}\n```\n\n```sh\n$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{project}}/{{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}\n```\n\n```sh\n$ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}}\n```\n\n", "properties": { "attributeCondition": { "type": "string", @@ -204416,6 +205984,10 @@ "workloadIdentityPoolProviderId": { "type": "string", "description": "The ID for the provider, which becomes the final component of the resource name. This\nvalue must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix\n`gcp-` is reserved for use by Google, and may not be specified.\n\n\n- - -\n" + }, + "x509": { + "$ref": "#/types/gcp:iam/WorkloadIdentityPoolProviderX509:WorkloadIdentityPoolProviderX509", + "description": "An X.509-type identity provider represents a CA. It is trusted to assert a\nclient identity if the client has a certificate that chains up to this CA.\nStructure is documented below.\n" } }, "required": [ @@ -204475,6 +206047,10 @@ "type": "string", "description": "The ID for the provider, which becomes the final component of the resource name. This\nvalue must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix\n`gcp-` is reserved for use by Google, and may not be specified.\n\n\n- - -\n", "willReplaceOnChanges": true + }, + "x509": { + "$ref": "#/types/gcp:iam/WorkloadIdentityPoolProviderX509:WorkloadIdentityPoolProviderX509", + "description": "An X.509-type identity provider represents a CA. It is trusted to assert a\nclient identity if the client has a certificate that chains up to this CA.\nStructure is documented below.\n" } }, "requiredInputs": [ @@ -204541,6 +206117,10 @@ "type": "string", "description": "The ID for the provider, which becomes the final component of the resource name. This\nvalue must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix\n`gcp-` is reserved for use by Google, and may not be specified.\n\n\n- - -\n", "willReplaceOnChanges": true + }, + "x509": { + "$ref": "#/types/gcp:iam/WorkloadIdentityPoolProviderX509:WorkloadIdentityPoolProviderX509", + "description": "An X.509-type identity provider represents a CA. It is trusted to assert a\nclient identity if the client has a certificate that chains up to this CA.\nStructure is documented below.\n" } }, "type": "object" @@ -209441,7 +211021,7 @@ } }, "gcp:kms/autokeyConfig:AutokeyConfig": { - "description": "## Example Usage\n\n### Kms Autokey Config All\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as time from \"@pulumi/time\";\n\n// Create Folder in GCP Organization\nconst autokmsFolder = new gcp.organizations.Folder(\"autokms_folder\", {\n displayName: \"my-folder\",\n parent: \"organizations/123456789\",\n deletionProtection: false,\n});\n// Create the key project\nconst keyProject = new gcp.organizations.Project(\"key_project\", {\n projectId: \"key-proj\",\n name: \"key-proj\",\n folderId: autokmsFolder.folderId,\n billingAccount: \"000000-0000000-0000000-000000\",\n deletionPolicy: \"DELETE\",\n}, {\n dependsOn: [autokmsFolder],\n});\n// Enable the Cloud KMS API\nconst kmsApiService = new gcp.projects.Service(\"kms_api_service\", {\n service: \"cloudkms.googleapis.com\",\n project: keyProject.projectId,\n disableOnDestroy: false,\n disableDependentServices: true,\n}, {\n dependsOn: [keyProject],\n});\n// Wait delay after enabling APIs\nconst waitEnableServiceApi = new time.index.Sleep(\"wait_enable_service_api\", {createDuration: \"30s\"}, {\n dependsOn: [kmsApiService],\n});\n//Create KMS Service Agent\nconst kmsServiceAgent = new gcp.projects.ServiceIdentity(\"kms_service_agent\", {\n service: \"cloudkms.googleapis.com\",\n project: keyProject.number,\n}, {\n dependsOn: [waitEnableServiceApi],\n});\n// Wait delay after creating service agent.\nconst waitServiceAgent = new time.index.Sleep(\"wait_service_agent\", {createDuration: \"10s\"}, {\n dependsOn: [kmsServiceAgent],\n});\n//Grant the KMS Service Agent the Cloud KMS Admin role\nconst autokeyProjectAdmin = new gcp.projects.IAMMember(\"autokey_project_admin\", {\n project: keyProject.projectId,\n role: \"roles/cloudkms.admin\",\n member: pulumi.interpolate`serviceAccount:service-${keyProject.number}@gcp-sa-cloudkms.iam.gserviceaccount.com`,\n}, {\n dependsOn: [waitServiceAgent],\n});\n// Wait delay after granting IAM permissions\nconst waitSrvAccPermissions = new time.index.Sleep(\"wait_srv_acc_permissions\", {createDuration: \"10s\"}, {\n dependsOn: [autokeyProjectAdmin],\n});\nconst example_autokeyconfig = new gcp.kms.AutokeyConfig(\"example-autokeyconfig\", {\n folder: autokmsFolder.folderId,\n keyProject: pulumi.interpolate`projects/${keyProject.projectId}`,\n}, {\n dependsOn: [waitSrvAccPermissions],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_time as time\n\n# Create Folder in GCP Organization\nautokms_folder = gcp.organizations.Folder(\"autokms_folder\",\n display_name=\"my-folder\",\n parent=\"organizations/123456789\",\n deletion_protection=False)\n# Create the key project\nkey_project = gcp.organizations.Project(\"key_project\",\n project_id=\"key-proj\",\n name=\"key-proj\",\n folder_id=autokms_folder.folder_id,\n billing_account=\"000000-0000000-0000000-000000\",\n deletion_policy=\"DELETE\",\n opts = pulumi.ResourceOptions(depends_on=[autokms_folder]))\n# Enable the Cloud KMS API\nkms_api_service = gcp.projects.Service(\"kms_api_service\",\n service=\"cloudkms.googleapis.com\",\n project=key_project.project_id,\n disable_on_destroy=False,\n disable_dependent_services=True,\n opts = pulumi.ResourceOptions(depends_on=[key_project]))\n# Wait delay after enabling APIs\nwait_enable_service_api = time.index.Sleep(\"wait_enable_service_api\", create_duration=30s,\nopts = pulumi.ResourceOptions(depends_on=[kms_api_service]))\n#Create KMS Service Agent\nkms_service_agent = gcp.projects.ServiceIdentity(\"kms_service_agent\",\n service=\"cloudkms.googleapis.com\",\n project=key_project.number,\n opts = pulumi.ResourceOptions(depends_on=[wait_enable_service_api]))\n# Wait delay after creating service agent.\nwait_service_agent = time.index.Sleep(\"wait_service_agent\", create_duration=10s,\nopts = pulumi.ResourceOptions(depends_on=[kms_service_agent]))\n#Grant the KMS Service Agent the Cloud KMS Admin role\nautokey_project_admin = gcp.projects.IAMMember(\"autokey_project_admin\",\n project=key_project.project_id,\n role=\"roles/cloudkms.admin\",\n member=key_project.number.apply(lambda number: f\"serviceAccount:service-{number}@gcp-sa-cloudkms.iam.gserviceaccount.com\"),\n opts = pulumi.ResourceOptions(depends_on=[wait_service_agent]))\n# Wait delay after granting IAM permissions\nwait_srv_acc_permissions = time.index.Sleep(\"wait_srv_acc_permissions\", create_duration=10s,\nopts = pulumi.ResourceOptions(depends_on=[autokey_project_admin]))\nexample_autokeyconfig = gcp.kms.AutokeyConfig(\"example-autokeyconfig\",\n folder=autokms_folder.folder_id,\n key_project=key_project.project_id.apply(lambda project_id: f\"projects/{project_id}\"),\n opts = pulumi.ResourceOptions(depends_on=[wait_srv_acc_permissions]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Time = Pulumi.Time;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n // Create Folder in GCP Organization\n var autokmsFolder = new Gcp.Organizations.Folder(\"autokms_folder\", new()\n {\n DisplayName = \"my-folder\",\n Parent = \"organizations/123456789\",\n DeletionProtection = false,\n });\n\n // Create the key project\n var keyProject = new Gcp.Organizations.Project(\"key_project\", new()\n {\n ProjectId = \"key-proj\",\n Name = \"key-proj\",\n FolderId = autokmsFolder.FolderId,\n BillingAccount = \"000000-0000000-0000000-000000\",\n DeletionPolicy = \"DELETE\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n autokmsFolder,\n },\n });\n\n // Enable the Cloud KMS API\n var kmsApiService = new Gcp.Projects.Service(\"kms_api_service\", new()\n {\n ServiceName = \"cloudkms.googleapis.com\",\n Project = keyProject.ProjectId,\n DisableOnDestroy = false,\n DisableDependentServices = true,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n keyProject,\n },\n });\n\n // Wait delay after enabling APIs\n var waitEnableServiceApi = new Time.Index.Sleep(\"wait_enable_service_api\", new()\n {\n CreateDuration = \"30s\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n kmsApiService,\n },\n });\n\n //Create KMS Service Agent\n var kmsServiceAgent = new Gcp.Projects.ServiceIdentity(\"kms_service_agent\", new()\n {\n Service = \"cloudkms.googleapis.com\",\n Project = keyProject.Number,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n waitEnableServiceApi,\n },\n });\n\n // Wait delay after creating service agent.\n var waitServiceAgent = new Time.Index.Sleep(\"wait_service_agent\", new()\n {\n CreateDuration = \"10s\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n kmsServiceAgent,\n },\n });\n\n //Grant the KMS Service Agent the Cloud KMS Admin role\n var autokeyProjectAdmin = new Gcp.Projects.IAMMember(\"autokey_project_admin\", new()\n {\n Project = keyProject.ProjectId,\n Role = \"roles/cloudkms.admin\",\n Member = keyProject.Number.Apply(number =\u003e $\"serviceAccount:service-{number}@gcp-sa-cloudkms.iam.gserviceaccount.com\"),\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n waitServiceAgent,\n },\n });\n\n // Wait delay after granting IAM permissions\n var waitSrvAccPermissions = new Time.Index.Sleep(\"wait_srv_acc_permissions\", new()\n {\n CreateDuration = \"10s\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n autokeyProjectAdmin,\n },\n });\n\n var example_autokeyconfig = new Gcp.Kms.AutokeyConfig(\"example-autokeyconfig\", new()\n {\n Folder = autokmsFolder.FolderId,\n KeyProject = keyProject.ProjectId.Apply(projectId =\u003e $\"projects/{projectId}\"),\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n waitSrvAccPermissions,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects\"\n\t\"github.com/pulumi/pulumi-time/sdk/go/time\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t// Create Folder in GCP Organization\n\t\tautokmsFolder, err := organizations.NewFolder(ctx, \"autokms_folder\", \u0026organizations.FolderArgs{\n\t\t\tDisplayName: pulumi.String(\"my-folder\"),\n\t\t\tParent: pulumi.String(\"organizations/123456789\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Create the key project\n\t\tkeyProject, err := organizations.NewProject(ctx, \"key_project\", \u0026organizations.ProjectArgs{\n\t\t\tProjectId: pulumi.String(\"key-proj\"),\n\t\t\tName: pulumi.String(\"key-proj\"),\n\t\t\tFolderId: autokmsFolder.FolderId,\n\t\t\tBillingAccount: pulumi.String(\"000000-0000000-0000000-000000\"),\n\t\t\tDeletionPolicy: pulumi.String(\"DELETE\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tautokmsFolder,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Enable the Cloud KMS API\n\t\tkmsApiService, err := projects.NewService(ctx, \"kms_api_service\", \u0026projects.ServiceArgs{\n\t\t\tService: pulumi.String(\"cloudkms.googleapis.com\"),\n\t\t\tProject: keyProject.ProjectId,\n\t\t\tDisableOnDestroy: pulumi.Bool(false),\n\t\t\tDisableDependentServices: pulumi.Bool(true),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tkeyProject,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Wait delay after enabling APIs\n\t\twaitEnableServiceApi, err := time.NewSleep(ctx, \"wait_enable_service_api\", \u0026time.SleepArgs{\n\t\t\tCreateDuration: \"30s\",\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tkmsApiService,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Create KMS Service Agent\n\t\tkmsServiceAgent, err := projects.NewServiceIdentity(ctx, \"kms_service_agent\", \u0026projects.ServiceIdentityArgs{\n\t\t\tService: pulumi.String(\"cloudkms.googleapis.com\"),\n\t\t\tProject: keyProject.Number,\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\twaitEnableServiceApi,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Wait delay after creating service agent.\n\t\twaitServiceAgent, err := time.NewSleep(ctx, \"wait_service_agent\", \u0026time.SleepArgs{\n\t\t\tCreateDuration: \"10s\",\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tkmsServiceAgent,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Grant the KMS Service Agent the Cloud KMS Admin role\n\t\tautokeyProjectAdmin, err := projects.NewIAMMember(ctx, \"autokey_project_admin\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: keyProject.ProjectId,\n\t\t\tRole: pulumi.String(\"roles/cloudkms.admin\"),\n\t\t\tMember: keyProject.Number.ApplyT(func(number string) (string, error) {\n\t\t\t\treturn fmt.Sprintf(\"serviceAccount:service-%v@gcp-sa-cloudkms.iam.gserviceaccount.com\", number), nil\n\t\t\t}).(pulumi.StringOutput),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\twaitServiceAgent,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Wait delay after granting IAM permissions\n\t\twaitSrvAccPermissions, err := time.NewSleep(ctx, \"wait_srv_acc_permissions\", \u0026time.SleepArgs{\n\t\t\tCreateDuration: \"10s\",\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tautokeyProjectAdmin,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = kms.NewAutokeyConfig(ctx, \"example-autokeyconfig\", \u0026kms.AutokeyConfigArgs{\n\t\t\tFolder: autokmsFolder.FolderId,\n\t\t\tKeyProject: keyProject.ProjectId.ApplyT(func(projectId string) (string, error) {\n\t\t\t\treturn fmt.Sprintf(\"projects/%v\", projectId), nil\n\t\t\t}).(pulumi.StringOutput),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\twaitSrvAccPermissions,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.Folder;\nimport com.pulumi.gcp.organizations.FolderArgs;\nimport com.pulumi.gcp.organizations.Project;\nimport com.pulumi.gcp.organizations.ProjectArgs;\nimport com.pulumi.gcp.projects.Service;\nimport com.pulumi.gcp.projects.ServiceArgs;\nimport com.pulumi.time.sleep;\nimport com.pulumi.time.SleepArgs;\nimport com.pulumi.gcp.projects.ServiceIdentity;\nimport com.pulumi.gcp.projects.ServiceIdentityArgs;\nimport com.pulumi.gcp.projects.IAMMember;\nimport com.pulumi.gcp.projects.IAMMemberArgs;\nimport com.pulumi.gcp.kms.AutokeyConfig;\nimport com.pulumi.gcp.kms.AutokeyConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n // Create Folder in GCP Organization\n var autokmsFolder = new Folder(\"autokmsFolder\", FolderArgs.builder()\n .displayName(\"my-folder\")\n .parent(\"organizations/123456789\")\n .deletionProtection(false)\n .build());\n\n // Create the key project\n var keyProject = new Project(\"keyProject\", ProjectArgs.builder()\n .projectId(\"key-proj\")\n .name(\"key-proj\")\n .folderId(autokmsFolder.folderId())\n .billingAccount(\"000000-0000000-0000000-000000\")\n .deletionPolicy(\"DELETE\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(autokmsFolder)\n .build());\n\n // Enable the Cloud KMS API\n var kmsApiService = new Service(\"kmsApiService\", ServiceArgs.builder()\n .service(\"cloudkms.googleapis.com\")\n .project(keyProject.projectId())\n .disableOnDestroy(false)\n .disableDependentServices(true)\n .build(), CustomResourceOptions.builder()\n .dependsOn(keyProject)\n .build());\n\n // Wait delay after enabling APIs\n var waitEnableServiceApi = new Sleep(\"waitEnableServiceApi\", SleepArgs.builder()\n .createDuration(\"30s\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(kmsApiService)\n .build());\n\n //Create KMS Service Agent\n var kmsServiceAgent = new ServiceIdentity(\"kmsServiceAgent\", ServiceIdentityArgs.builder()\n .service(\"cloudkms.googleapis.com\")\n .project(keyProject.number())\n .build(), CustomResourceOptions.builder()\n .dependsOn(waitEnableServiceApi)\n .build());\n\n // Wait delay after creating service agent.\n var waitServiceAgent = new Sleep(\"waitServiceAgent\", SleepArgs.builder()\n .createDuration(\"10s\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(kmsServiceAgent)\n .build());\n\n //Grant the KMS Service Agent the Cloud KMS Admin role\n var autokeyProjectAdmin = new IAMMember(\"autokeyProjectAdmin\", IAMMemberArgs.builder()\n .project(keyProject.projectId())\n .role(\"roles/cloudkms.admin\")\n .member(keyProject.number().applyValue(number -\u003e String.format(\"serviceAccount:service-%s@gcp-sa-cloudkms.iam.gserviceaccount.com\", number)))\n .build(), CustomResourceOptions.builder()\n .dependsOn(waitServiceAgent)\n .build());\n\n // Wait delay after granting IAM permissions\n var waitSrvAccPermissions = new Sleep(\"waitSrvAccPermissions\", SleepArgs.builder()\n .createDuration(\"10s\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(autokeyProjectAdmin)\n .build());\n\n var example_autokeyconfig = new AutokeyConfig(\"example-autokeyconfig\", AutokeyConfigArgs.builder()\n .folder(autokmsFolder.folderId())\n .keyProject(keyProject.projectId().applyValue(projectId -\u003e String.format(\"projects/%s\", projectId)))\n .build(), CustomResourceOptions.builder()\n .dependsOn(waitSrvAccPermissions)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n # Create Folder in GCP Organization\n autokmsFolder:\n type: gcp:organizations:Folder\n name: autokms_folder\n properties:\n displayName: my-folder\n parent: organizations/123456789\n deletionProtection: false\n # Create the key project\n keyProject:\n type: gcp:organizations:Project\n name: key_project\n properties:\n projectId: key-proj\n name: key-proj\n folderId: ${autokmsFolder.folderId}\n billingAccount: 000000-0000000-0000000-000000\n deletionPolicy: DELETE\n options:\n dependson:\n - ${autokmsFolder}\n # Enable the Cloud KMS API\n kmsApiService:\n type: gcp:projects:Service\n name: kms_api_service\n properties:\n service: cloudkms.googleapis.com\n project: ${keyProject.projectId}\n disableOnDestroy: false\n disableDependentServices: true\n options:\n dependson:\n - ${keyProject}\n # Wait delay after enabling APIs\n waitEnableServiceApi:\n type: time:sleep\n name: wait_enable_service_api\n properties:\n createDuration: 30s\n options:\n dependson:\n - ${kmsApiService}\n #Create KMS Service Agent\n kmsServiceAgent:\n type: gcp:projects:ServiceIdentity\n name: kms_service_agent\n properties:\n service: cloudkms.googleapis.com\n project: ${keyProject.number}\n options:\n dependson:\n - ${waitEnableServiceApi}\n # Wait delay after creating service agent.\n waitServiceAgent:\n type: time:sleep\n name: wait_service_agent\n properties:\n createDuration: 10s\n options:\n dependson:\n - ${kmsServiceAgent}\n #Grant the KMS Service Agent the Cloud KMS Admin role\n autokeyProjectAdmin:\n type: gcp:projects:IAMMember\n name: autokey_project_admin\n properties:\n project: ${keyProject.projectId}\n role: roles/cloudkms.admin\n member: serviceAccount:service-${keyProject.number}@gcp-sa-cloudkms.iam.gserviceaccount.com\n options:\n dependson:\n - ${waitServiceAgent}\n # Wait delay after granting IAM permissions\n waitSrvAccPermissions:\n type: time:sleep\n name: wait_srv_acc_permissions\n properties:\n createDuration: 10s\n options:\n dependson:\n - ${autokeyProjectAdmin}\n example-autokeyconfig:\n type: gcp:kms:AutokeyConfig\n properties:\n folder: ${autokmsFolder.folderId}\n keyProject: projects/${keyProject.projectId}\n options:\n dependson:\n - ${waitSrvAccPermissions}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nAutokeyConfig can be imported using any of these accepted formats:\n\n* `folders/{{folder}}/autokeyConfig`\n\n* `{{folder}}`\n\nWhen using the `pulumi import` command, AutokeyConfig can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:kms/autokeyConfig:AutokeyConfig default folders/{{folder}}/autokeyConfig\n```\n\n```sh\n$ pulumi import gcp:kms/autokeyConfig:AutokeyConfig default {{folder}}\n```\n\n", + "description": "## Example Usage\n\n### Kms Autokey Config All\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as time from \"@pulumi/time\";\n\n// Create Folder in GCP Organization\nconst autokmsFolder = new gcp.organizations.Folder(\"autokms_folder\", {\n displayName: \"my-folder\",\n parent: \"organizations/123456789\",\n deletionProtection: false,\n});\n// Create the key project\nconst keyProject = new gcp.organizations.Project(\"key_project\", {\n projectId: \"key-proj\",\n name: \"key-proj\",\n folderId: autokmsFolder.folderId,\n billingAccount: \"000000-0000000-0000000-000000\",\n deletionPolicy: \"DELETE\",\n}, {\n dependsOn: [autokmsFolder],\n});\n// Enable the Cloud KMS API\nconst kmsApiService = new gcp.projects.Service(\"kms_api_service\", {\n service: \"cloudkms.googleapis.com\",\n project: keyProject.projectId,\n disableOnDestroy: false,\n disableDependentServices: true,\n}, {\n dependsOn: [keyProject],\n});\n// Wait delay after enabling APIs\nconst waitEnableServiceApi = new time.index.Sleep(\"wait_enable_service_api\", {createDuration: \"30s\"}, {\n dependsOn: [kmsApiService],\n});\n//Create KMS Service Agent\nconst kmsServiceAgent = new gcp.projects.ServiceIdentity(\"kms_service_agent\", {\n service: \"cloudkms.googleapis.com\",\n project: keyProject.number,\n}, {\n dependsOn: [waitEnableServiceApi],\n});\n// Wait delay after creating service agent.\nconst waitServiceAgent = new time.index.Sleep(\"wait_service_agent\", {createDuration: \"10s\"}, {\n dependsOn: [kmsServiceAgent],\n});\n//Grant the KMS Service Agent the Cloud KMS Admin role\nconst autokeyProjectAdmin = new gcp.projects.IAMMember(\"autokey_project_admin\", {\n project: keyProject.projectId,\n role: \"roles/cloudkms.admin\",\n member: pulumi.interpolate`serviceAccount:service-${keyProject.number}@gcp-sa-cloudkms.iam.gserviceaccount.com`,\n}, {\n dependsOn: [waitServiceAgent],\n});\n// Wait delay after granting IAM permissions\nconst waitSrvAccPermissions = new time.index.Sleep(\"wait_srv_acc_permissions\", {createDuration: \"10s\"}, {\n dependsOn: [autokeyProjectAdmin],\n});\nconst example_autokeyconfig = new gcp.kms.AutokeyConfig(\"example-autokeyconfig\", {\n folder: autokmsFolder.id,\n keyProject: pulumi.interpolate`projects/${keyProject.projectId}`,\n}, {\n dependsOn: [waitSrvAccPermissions],\n});\n// Wait delay after setting AutokeyConfig, to prevent diffs on reapply,\n// because setting the config takes a little to fully propagate.\nconst waitAutokeyPropagation = new time.index.Sleep(\"wait_autokey_propagation\", {createDuration: \"30s\"}, {\n dependsOn: [example_autokeyconfig],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_time as time\n\n# Create Folder in GCP Organization\nautokms_folder = gcp.organizations.Folder(\"autokms_folder\",\n display_name=\"my-folder\",\n parent=\"organizations/123456789\",\n deletion_protection=False)\n# Create the key project\nkey_project = gcp.organizations.Project(\"key_project\",\n project_id=\"key-proj\",\n name=\"key-proj\",\n folder_id=autokms_folder.folder_id,\n billing_account=\"000000-0000000-0000000-000000\",\n deletion_policy=\"DELETE\",\n opts = pulumi.ResourceOptions(depends_on=[autokms_folder]))\n# Enable the Cloud KMS API\nkms_api_service = gcp.projects.Service(\"kms_api_service\",\n service=\"cloudkms.googleapis.com\",\n project=key_project.project_id,\n disable_on_destroy=False,\n disable_dependent_services=True,\n opts = pulumi.ResourceOptions(depends_on=[key_project]))\n# Wait delay after enabling APIs\nwait_enable_service_api = time.index.Sleep(\"wait_enable_service_api\", create_duration=30s,\nopts = pulumi.ResourceOptions(depends_on=[kms_api_service]))\n#Create KMS Service Agent\nkms_service_agent = gcp.projects.ServiceIdentity(\"kms_service_agent\",\n service=\"cloudkms.googleapis.com\",\n project=key_project.number,\n opts = pulumi.ResourceOptions(depends_on=[wait_enable_service_api]))\n# Wait delay after creating service agent.\nwait_service_agent = time.index.Sleep(\"wait_service_agent\", create_duration=10s,\nopts = pulumi.ResourceOptions(depends_on=[kms_service_agent]))\n#Grant the KMS Service Agent the Cloud KMS Admin role\nautokey_project_admin = gcp.projects.IAMMember(\"autokey_project_admin\",\n project=key_project.project_id,\n role=\"roles/cloudkms.admin\",\n member=key_project.number.apply(lambda number: f\"serviceAccount:service-{number}@gcp-sa-cloudkms.iam.gserviceaccount.com\"),\n opts = pulumi.ResourceOptions(depends_on=[wait_service_agent]))\n# Wait delay after granting IAM permissions\nwait_srv_acc_permissions = time.index.Sleep(\"wait_srv_acc_permissions\", create_duration=10s,\nopts = pulumi.ResourceOptions(depends_on=[autokey_project_admin]))\nexample_autokeyconfig = gcp.kms.AutokeyConfig(\"example-autokeyconfig\",\n folder=autokms_folder.id,\n key_project=key_project.project_id.apply(lambda project_id: f\"projects/{project_id}\"),\n opts = pulumi.ResourceOptions(depends_on=[wait_srv_acc_permissions]))\n# Wait delay after setting AutokeyConfig, to prevent diffs on reapply,\n# because setting the config takes a little to fully propagate.\nwait_autokey_propagation = time.index.Sleep(\"wait_autokey_propagation\", create_duration=30s,\nopts = pulumi.ResourceOptions(depends_on=[example_autokeyconfig]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Time = Pulumi.Time;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n // Create Folder in GCP Organization\n var autokmsFolder = new Gcp.Organizations.Folder(\"autokms_folder\", new()\n {\n DisplayName = \"my-folder\",\n Parent = \"organizations/123456789\",\n DeletionProtection = false,\n });\n\n // Create the key project\n var keyProject = new Gcp.Organizations.Project(\"key_project\", new()\n {\n ProjectId = \"key-proj\",\n Name = \"key-proj\",\n FolderId = autokmsFolder.FolderId,\n BillingAccount = \"000000-0000000-0000000-000000\",\n DeletionPolicy = \"DELETE\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n autokmsFolder,\n },\n });\n\n // Enable the Cloud KMS API\n var kmsApiService = new Gcp.Projects.Service(\"kms_api_service\", new()\n {\n ServiceName = \"cloudkms.googleapis.com\",\n Project = keyProject.ProjectId,\n DisableOnDestroy = false,\n DisableDependentServices = true,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n keyProject,\n },\n });\n\n // Wait delay after enabling APIs\n var waitEnableServiceApi = new Time.Index.Sleep(\"wait_enable_service_api\", new()\n {\n CreateDuration = \"30s\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n kmsApiService,\n },\n });\n\n //Create KMS Service Agent\n var kmsServiceAgent = new Gcp.Projects.ServiceIdentity(\"kms_service_agent\", new()\n {\n Service = \"cloudkms.googleapis.com\",\n Project = keyProject.Number,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n waitEnableServiceApi,\n },\n });\n\n // Wait delay after creating service agent.\n var waitServiceAgent = new Time.Index.Sleep(\"wait_service_agent\", new()\n {\n CreateDuration = \"10s\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n kmsServiceAgent,\n },\n });\n\n //Grant the KMS Service Agent the Cloud KMS Admin role\n var autokeyProjectAdmin = new Gcp.Projects.IAMMember(\"autokey_project_admin\", new()\n {\n Project = keyProject.ProjectId,\n Role = \"roles/cloudkms.admin\",\n Member = keyProject.Number.Apply(number =\u003e $\"serviceAccount:service-{number}@gcp-sa-cloudkms.iam.gserviceaccount.com\"),\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n waitServiceAgent,\n },\n });\n\n // Wait delay after granting IAM permissions\n var waitSrvAccPermissions = new Time.Index.Sleep(\"wait_srv_acc_permissions\", new()\n {\n CreateDuration = \"10s\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n autokeyProjectAdmin,\n },\n });\n\n var example_autokeyconfig = new Gcp.Kms.AutokeyConfig(\"example-autokeyconfig\", new()\n {\n Folder = autokmsFolder.Id,\n KeyProject = keyProject.ProjectId.Apply(projectId =\u003e $\"projects/{projectId}\"),\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n waitSrvAccPermissions,\n },\n });\n\n // Wait delay after setting AutokeyConfig, to prevent diffs on reapply,\n // because setting the config takes a little to fully propagate.\n var waitAutokeyPropagation = new Time.Index.Sleep(\"wait_autokey_propagation\", new()\n {\n CreateDuration = \"30s\",\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n example_autokeyconfig,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects\"\n\t\"github.com/pulumi/pulumi-time/sdk/go/time\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t// Create Folder in GCP Organization\n\t\tautokmsFolder, err := organizations.NewFolder(ctx, \"autokms_folder\", \u0026organizations.FolderArgs{\n\t\t\tDisplayName: pulumi.String(\"my-folder\"),\n\t\t\tParent: pulumi.String(\"organizations/123456789\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Create the key project\n\t\tkeyProject, err := organizations.NewProject(ctx, \"key_project\", \u0026organizations.ProjectArgs{\n\t\t\tProjectId: pulumi.String(\"key-proj\"),\n\t\t\tName: pulumi.String(\"key-proj\"),\n\t\t\tFolderId: autokmsFolder.FolderId,\n\t\t\tBillingAccount: pulumi.String(\"000000-0000000-0000000-000000\"),\n\t\t\tDeletionPolicy: pulumi.String(\"DELETE\"),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tautokmsFolder,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Enable the Cloud KMS API\n\t\tkmsApiService, err := projects.NewService(ctx, \"kms_api_service\", \u0026projects.ServiceArgs{\n\t\t\tService: pulumi.String(\"cloudkms.googleapis.com\"),\n\t\t\tProject: keyProject.ProjectId,\n\t\t\tDisableOnDestroy: pulumi.Bool(false),\n\t\t\tDisableDependentServices: pulumi.Bool(true),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tkeyProject,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Wait delay after enabling APIs\n\t\twaitEnableServiceApi, err := time.NewSleep(ctx, \"wait_enable_service_api\", \u0026time.SleepArgs{\n\t\t\tCreateDuration: \"30s\",\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tkmsApiService,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Create KMS Service Agent\n\t\tkmsServiceAgent, err := projects.NewServiceIdentity(ctx, \"kms_service_agent\", \u0026projects.ServiceIdentityArgs{\n\t\t\tService: pulumi.String(\"cloudkms.googleapis.com\"),\n\t\t\tProject: keyProject.Number,\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\twaitEnableServiceApi,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Wait delay after creating service agent.\n\t\twaitServiceAgent, err := time.NewSleep(ctx, \"wait_service_agent\", \u0026time.SleepArgs{\n\t\t\tCreateDuration: \"10s\",\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tkmsServiceAgent,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Grant the KMS Service Agent the Cloud KMS Admin role\n\t\tautokeyProjectAdmin, err := projects.NewIAMMember(ctx, \"autokey_project_admin\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: keyProject.ProjectId,\n\t\t\tRole: pulumi.String(\"roles/cloudkms.admin\"),\n\t\t\tMember: keyProject.Number.ApplyT(func(number string) (string, error) {\n\t\t\t\treturn fmt.Sprintf(\"serviceAccount:service-%v@gcp-sa-cloudkms.iam.gserviceaccount.com\", number), nil\n\t\t\t}).(pulumi.StringOutput),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\twaitServiceAgent,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Wait delay after granting IAM permissions\n\t\twaitSrvAccPermissions, err := time.NewSleep(ctx, \"wait_srv_acc_permissions\", \u0026time.SleepArgs{\n\t\t\tCreateDuration: \"10s\",\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tautokeyProjectAdmin,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = kms.NewAutokeyConfig(ctx, \"example-autokeyconfig\", \u0026kms.AutokeyConfigArgs{\n\t\t\tFolder: autokmsFolder.ID(),\n\t\t\tKeyProject: keyProject.ProjectId.ApplyT(func(projectId string) (string, error) {\n\t\t\t\treturn fmt.Sprintf(\"projects/%v\", projectId), nil\n\t\t\t}).(pulumi.StringOutput),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\twaitSrvAccPermissions,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Wait delay after setting AutokeyConfig, to prevent diffs on reapply,\n\t\t// because setting the config takes a little to fully propagate.\n\t\t_, err = time.NewSleep(ctx, \"wait_autokey_propagation\", \u0026time.SleepArgs{\n\t\t\tCreateDuration: \"30s\",\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\texample_autokeyconfig,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.Folder;\nimport com.pulumi.gcp.organizations.FolderArgs;\nimport com.pulumi.gcp.organizations.Project;\nimport com.pulumi.gcp.organizations.ProjectArgs;\nimport com.pulumi.gcp.projects.Service;\nimport com.pulumi.gcp.projects.ServiceArgs;\nimport com.pulumi.time.sleep;\nimport com.pulumi.time.SleepArgs;\nimport com.pulumi.gcp.projects.ServiceIdentity;\nimport com.pulumi.gcp.projects.ServiceIdentityArgs;\nimport com.pulumi.gcp.projects.IAMMember;\nimport com.pulumi.gcp.projects.IAMMemberArgs;\nimport com.pulumi.gcp.kms.AutokeyConfig;\nimport com.pulumi.gcp.kms.AutokeyConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n // Create Folder in GCP Organization\n var autokmsFolder = new Folder(\"autokmsFolder\", FolderArgs.builder()\n .displayName(\"my-folder\")\n .parent(\"organizations/123456789\")\n .deletionProtection(false)\n .build());\n\n // Create the key project\n var keyProject = new Project(\"keyProject\", ProjectArgs.builder()\n .projectId(\"key-proj\")\n .name(\"key-proj\")\n .folderId(autokmsFolder.folderId())\n .billingAccount(\"000000-0000000-0000000-000000\")\n .deletionPolicy(\"DELETE\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(autokmsFolder)\n .build());\n\n // Enable the Cloud KMS API\n var kmsApiService = new Service(\"kmsApiService\", ServiceArgs.builder()\n .service(\"cloudkms.googleapis.com\")\n .project(keyProject.projectId())\n .disableOnDestroy(false)\n .disableDependentServices(true)\n .build(), CustomResourceOptions.builder()\n .dependsOn(keyProject)\n .build());\n\n // Wait delay after enabling APIs\n var waitEnableServiceApi = new Sleep(\"waitEnableServiceApi\", SleepArgs.builder()\n .createDuration(\"30s\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(kmsApiService)\n .build());\n\n //Create KMS Service Agent\n var kmsServiceAgent = new ServiceIdentity(\"kmsServiceAgent\", ServiceIdentityArgs.builder()\n .service(\"cloudkms.googleapis.com\")\n .project(keyProject.number())\n .build(), CustomResourceOptions.builder()\n .dependsOn(waitEnableServiceApi)\n .build());\n\n // Wait delay after creating service agent.\n var waitServiceAgent = new Sleep(\"waitServiceAgent\", SleepArgs.builder()\n .createDuration(\"10s\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(kmsServiceAgent)\n .build());\n\n //Grant the KMS Service Agent the Cloud KMS Admin role\n var autokeyProjectAdmin = new IAMMember(\"autokeyProjectAdmin\", IAMMemberArgs.builder()\n .project(keyProject.projectId())\n .role(\"roles/cloudkms.admin\")\n .member(keyProject.number().applyValue(number -\u003e String.format(\"serviceAccount:service-%s@gcp-sa-cloudkms.iam.gserviceaccount.com\", number)))\n .build(), CustomResourceOptions.builder()\n .dependsOn(waitServiceAgent)\n .build());\n\n // Wait delay after granting IAM permissions\n var waitSrvAccPermissions = new Sleep(\"waitSrvAccPermissions\", SleepArgs.builder()\n .createDuration(\"10s\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(autokeyProjectAdmin)\n .build());\n\n var example_autokeyconfig = new AutokeyConfig(\"example-autokeyconfig\", AutokeyConfigArgs.builder()\n .folder(autokmsFolder.id())\n .keyProject(keyProject.projectId().applyValue(projectId -\u003e String.format(\"projects/%s\", projectId)))\n .build(), CustomResourceOptions.builder()\n .dependsOn(waitSrvAccPermissions)\n .build());\n\n // Wait delay after setting AutokeyConfig, to prevent diffs on reapply,\n // because setting the config takes a little to fully propagate.\n var waitAutokeyPropagation = new Sleep(\"waitAutokeyPropagation\", SleepArgs.builder()\n .createDuration(\"30s\")\n .build(), CustomResourceOptions.builder()\n .dependsOn(example_autokeyconfig)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n # Create Folder in GCP Organization\n autokmsFolder:\n type: gcp:organizations:Folder\n name: autokms_folder\n properties:\n displayName: my-folder\n parent: organizations/123456789\n deletionProtection: false\n # Create the key project\n keyProject:\n type: gcp:organizations:Project\n name: key_project\n properties:\n projectId: key-proj\n name: key-proj\n folderId: ${autokmsFolder.folderId}\n billingAccount: 000000-0000000-0000000-000000\n deletionPolicy: DELETE\n options:\n dependson:\n - ${autokmsFolder}\n # Enable the Cloud KMS API\n kmsApiService:\n type: gcp:projects:Service\n name: kms_api_service\n properties:\n service: cloudkms.googleapis.com\n project: ${keyProject.projectId}\n disableOnDestroy: false\n disableDependentServices: true\n options:\n dependson:\n - ${keyProject}\n # Wait delay after enabling APIs\n waitEnableServiceApi:\n type: time:sleep\n name: wait_enable_service_api\n properties:\n createDuration: 30s\n options:\n dependson:\n - ${kmsApiService}\n #Create KMS Service Agent\n kmsServiceAgent:\n type: gcp:projects:ServiceIdentity\n name: kms_service_agent\n properties:\n service: cloudkms.googleapis.com\n project: ${keyProject.number}\n options:\n dependson:\n - ${waitEnableServiceApi}\n # Wait delay after creating service agent.\n waitServiceAgent:\n type: time:sleep\n name: wait_service_agent\n properties:\n createDuration: 10s\n options:\n dependson:\n - ${kmsServiceAgent}\n #Grant the KMS Service Agent the Cloud KMS Admin role\n autokeyProjectAdmin:\n type: gcp:projects:IAMMember\n name: autokey_project_admin\n properties:\n project: ${keyProject.projectId}\n role: roles/cloudkms.admin\n member: serviceAccount:service-${keyProject.number}@gcp-sa-cloudkms.iam.gserviceaccount.com\n options:\n dependson:\n - ${waitServiceAgent}\n # Wait delay after granting IAM permissions\n waitSrvAccPermissions:\n type: time:sleep\n name: wait_srv_acc_permissions\n properties:\n createDuration: 10s\n options:\n dependson:\n - ${autokeyProjectAdmin}\n example-autokeyconfig:\n type: gcp:kms:AutokeyConfig\n properties:\n folder: ${autokmsFolder.id}\n keyProject: projects/${keyProject.projectId}\n options:\n dependson:\n - ${waitSrvAccPermissions}\n # Wait delay after setting AutokeyConfig, to prevent diffs on reapply,\n # because setting the config takes a little to fully propagate.\n waitAutokeyPropagation:\n type: time:sleep\n name: wait_autokey_propagation\n properties:\n createDuration: 30s\n options:\n dependson:\n - ${[\"example-autokeyconfig\"]}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nAutokeyConfig can be imported using any of these accepted formats:\n\n* `folders/{{folder}}/autokeyConfig`\n\n* `{{folder}}`\n\nWhen using the `pulumi import` command, AutokeyConfig can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:kms/autokeyConfig:AutokeyConfig default folders/{{folder}}/autokeyConfig\n```\n\n```sh\n$ pulumi import gcp:kms/autokeyConfig:AutokeyConfig default {{folder}}\n```\n\n", "properties": { "folder": { "type": "string", @@ -216450,7 +218030,7 @@ } }, "gcp:netapp/activeDirectory:ActiveDirectory": { - "description": "ActiveDirectory is the public representation of the active directory config.\n\n\nTo get more information about activeDirectory, see:\n\n* [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.activeDirectories)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/netapp/volumes/docs/configure-and-use/active-directory/about-ad)\n\n\n\n## Example Usage\n\n### Netapp Active Directory Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst testActiveDirectoryFull = new gcp.netapp.ActiveDirectory(\"test_active_directory_full\", {\n name: \"test-active-directory-full\",\n location: \"us-central1\",\n domain: \"ad.internal\",\n dns: \"172.30.64.3\",\n netBiosPrefix: \"smbserver\",\n username: \"user\",\n password: \"pass\",\n aesEncryption: false,\n backupOperators: [\n \"test1\",\n \"test2\",\n ],\n administrators: [\n \"test1\",\n \"test2\",\n ],\n description: \"ActiveDirectory is the public representation of the active directory config.\",\n encryptDcConnections: false,\n kdcHostname: \"hostname\",\n kdcIp: \"10.10.0.11\",\n labels: {\n foo: \"bar\",\n },\n ldapSigning: false,\n nfsUsersWithLdap: false,\n organizationalUnit: \"CN=Computers\",\n securityOperators: [\n \"test1\",\n \"test2\",\n ],\n site: \"test-site\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ntest_active_directory_full = gcp.netapp.ActiveDirectory(\"test_active_directory_full\",\n name=\"test-active-directory-full\",\n location=\"us-central1\",\n domain=\"ad.internal\",\n dns=\"172.30.64.3\",\n net_bios_prefix=\"smbserver\",\n username=\"user\",\n password=\"pass\",\n aes_encryption=False,\n backup_operators=[\n \"test1\",\n \"test2\",\n ],\n administrators=[\n \"test1\",\n \"test2\",\n ],\n description=\"ActiveDirectory is the public representation of the active directory config.\",\n encrypt_dc_connections=False,\n kdc_hostname=\"hostname\",\n kdc_ip=\"10.10.0.11\",\n labels={\n \"foo\": \"bar\",\n },\n ldap_signing=False,\n nfs_users_with_ldap=False,\n organizational_unit=\"CN=Computers\",\n security_operators=[\n \"test1\",\n \"test2\",\n ],\n site=\"test-site\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var testActiveDirectoryFull = new Gcp.Netapp.ActiveDirectory(\"test_active_directory_full\", new()\n {\n Name = \"test-active-directory-full\",\n Location = \"us-central1\",\n Domain = \"ad.internal\",\n Dns = \"172.30.64.3\",\n NetBiosPrefix = \"smbserver\",\n Username = \"user\",\n Password = \"pass\",\n AesEncryption = false,\n BackupOperators = new[]\n {\n \"test1\",\n \"test2\",\n },\n Administrators = new[]\n {\n \"test1\",\n \"test2\",\n },\n Description = \"ActiveDirectory is the public representation of the active directory config.\",\n EncryptDcConnections = false,\n KdcHostname = \"hostname\",\n KdcIp = \"10.10.0.11\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n LdapSigning = false,\n NfsUsersWithLdap = false,\n OrganizationalUnit = \"CN=Computers\",\n SecurityOperators = new[]\n {\n \"test1\",\n \"test2\",\n },\n Site = \"test-site\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/netapp\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := netapp.NewActiveDirectory(ctx, \"test_active_directory_full\", \u0026netapp.ActiveDirectoryArgs{\n\t\t\tName: pulumi.String(\"test-active-directory-full\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDomain: pulumi.String(\"ad.internal\"),\n\t\t\tDns: pulumi.String(\"172.30.64.3\"),\n\t\t\tNetBiosPrefix: pulumi.String(\"smbserver\"),\n\t\t\tUsername: pulumi.String(\"user\"),\n\t\t\tPassword: pulumi.String(\"pass\"),\n\t\t\tAesEncryption: pulumi.Bool(false),\n\t\t\tBackupOperators: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"test1\"),\n\t\t\t\tpulumi.String(\"test2\"),\n\t\t\t},\n\t\t\tAdministrators: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"test1\"),\n\t\t\t\tpulumi.String(\"test2\"),\n\t\t\t},\n\t\t\tDescription: pulumi.String(\"ActiveDirectory is the public representation of the active directory config.\"),\n\t\t\tEncryptDcConnections: pulumi.Bool(false),\n\t\t\tKdcHostname: pulumi.String(\"hostname\"),\n\t\t\tKdcIp: pulumi.String(\"10.10.0.11\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tLdapSigning: pulumi.Bool(false),\n\t\t\tNfsUsersWithLdap: pulumi.Bool(false),\n\t\t\tOrganizationalUnit: pulumi.String(\"CN=Computers\"),\n\t\t\tSecurityOperators: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"test1\"),\n\t\t\t\tpulumi.String(\"test2\"),\n\t\t\t},\n\t\t\tSite: pulumi.String(\"test-site\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.netapp.ActiveDirectory;\nimport com.pulumi.gcp.netapp.ActiveDirectoryArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var testActiveDirectoryFull = new ActiveDirectory(\"testActiveDirectoryFull\", ActiveDirectoryArgs.builder()\n .name(\"test-active-directory-full\")\n .location(\"us-central1\")\n .domain(\"ad.internal\")\n .dns(\"172.30.64.3\")\n .netBiosPrefix(\"smbserver\")\n .username(\"user\")\n .password(\"pass\")\n .aesEncryption(false)\n .backupOperators( \n \"test1\",\n \"test2\")\n .administrators( \n \"test1\",\n \"test2\")\n .description(\"ActiveDirectory is the public representation of the active directory config.\")\n .encryptDcConnections(false)\n .kdcHostname(\"hostname\")\n .kdcIp(\"10.10.0.11\")\n .labels(Map.of(\"foo\", \"bar\"))\n .ldapSigning(false)\n .nfsUsersWithLdap(false)\n .organizationalUnit(\"CN=Computers\")\n .securityOperators( \n \"test1\",\n \"test2\")\n .site(\"test-site\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n testActiveDirectoryFull:\n type: gcp:netapp:ActiveDirectory\n name: test_active_directory_full\n properties:\n name: test-active-directory-full\n location: us-central1\n domain: ad.internal\n dns: 172.30.64.3\n netBiosPrefix: smbserver\n username: user\n password: pass\n aesEncryption: false\n backupOperators:\n - test1\n - test2\n administrators:\n - test1\n - test2\n description: ActiveDirectory is the public representation of the active directory config.\n encryptDcConnections: false\n kdcHostname: hostname\n kdcIp: 10.10.0.11\n labels:\n foo: bar\n ldapSigning: false\n nfsUsersWithLdap: false\n organizationalUnit: CN=Computers\n securityOperators:\n - test1\n - test2\n site: test-site\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nactiveDirectory can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/activeDirectories/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, activeDirectory can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:netapp/activeDirectory:ActiveDirectory default projects/{{project}}/locations/{{location}}/activeDirectories/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/activeDirectory:ActiveDirectory default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/activeDirectory:ActiveDirectory default {{location}}/{{name}}\n```\n\n", + "description": "ActiveDirectory is the public representation of the active directory config.\n\n\nTo get more information about ActiveDirectory, see:\n\n* [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.activeDirectories)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/netapp/volumes/docs/configure-and-use/active-directory/about-ad)\n\n\n\n## Example Usage\n\n### Netapp Active Directory Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst testActiveDirectoryFull = new gcp.netapp.ActiveDirectory(\"test_active_directory_full\", {\n name: \"test-active-directory-full\",\n location: \"us-central1\",\n domain: \"ad.internal\",\n dns: \"172.30.64.3\",\n netBiosPrefix: \"smbserver\",\n username: \"user\",\n password: \"pass\",\n aesEncryption: false,\n backupOperators: [\n \"test1\",\n \"test2\",\n ],\n administrators: [\n \"test1\",\n \"test2\",\n ],\n description: \"ActiveDirectory is the public representation of the active directory config.\",\n encryptDcConnections: false,\n kdcHostname: \"hostname\",\n kdcIp: \"10.10.0.11\",\n labels: {\n foo: \"bar\",\n },\n ldapSigning: false,\n nfsUsersWithLdap: false,\n organizationalUnit: \"CN=Computers\",\n securityOperators: [\n \"test1\",\n \"test2\",\n ],\n site: \"test-site\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ntest_active_directory_full = gcp.netapp.ActiveDirectory(\"test_active_directory_full\",\n name=\"test-active-directory-full\",\n location=\"us-central1\",\n domain=\"ad.internal\",\n dns=\"172.30.64.3\",\n net_bios_prefix=\"smbserver\",\n username=\"user\",\n password=\"pass\",\n aes_encryption=False,\n backup_operators=[\n \"test1\",\n \"test2\",\n ],\n administrators=[\n \"test1\",\n \"test2\",\n ],\n description=\"ActiveDirectory is the public representation of the active directory config.\",\n encrypt_dc_connections=False,\n kdc_hostname=\"hostname\",\n kdc_ip=\"10.10.0.11\",\n labels={\n \"foo\": \"bar\",\n },\n ldap_signing=False,\n nfs_users_with_ldap=False,\n organizational_unit=\"CN=Computers\",\n security_operators=[\n \"test1\",\n \"test2\",\n ],\n site=\"test-site\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var testActiveDirectoryFull = new Gcp.Netapp.ActiveDirectory(\"test_active_directory_full\", new()\n {\n Name = \"test-active-directory-full\",\n Location = \"us-central1\",\n Domain = \"ad.internal\",\n Dns = \"172.30.64.3\",\n NetBiosPrefix = \"smbserver\",\n Username = \"user\",\n Password = \"pass\",\n AesEncryption = false,\n BackupOperators = new[]\n {\n \"test1\",\n \"test2\",\n },\n Administrators = new[]\n {\n \"test1\",\n \"test2\",\n },\n Description = \"ActiveDirectory is the public representation of the active directory config.\",\n EncryptDcConnections = false,\n KdcHostname = \"hostname\",\n KdcIp = \"10.10.0.11\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n LdapSigning = false,\n NfsUsersWithLdap = false,\n OrganizationalUnit = \"CN=Computers\",\n SecurityOperators = new[]\n {\n \"test1\",\n \"test2\",\n },\n Site = \"test-site\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/netapp\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := netapp.NewActiveDirectory(ctx, \"test_active_directory_full\", \u0026netapp.ActiveDirectoryArgs{\n\t\t\tName: pulumi.String(\"test-active-directory-full\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDomain: pulumi.String(\"ad.internal\"),\n\t\t\tDns: pulumi.String(\"172.30.64.3\"),\n\t\t\tNetBiosPrefix: pulumi.String(\"smbserver\"),\n\t\t\tUsername: pulumi.String(\"user\"),\n\t\t\tPassword: pulumi.String(\"pass\"),\n\t\t\tAesEncryption: pulumi.Bool(false),\n\t\t\tBackupOperators: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"test1\"),\n\t\t\t\tpulumi.String(\"test2\"),\n\t\t\t},\n\t\t\tAdministrators: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"test1\"),\n\t\t\t\tpulumi.String(\"test2\"),\n\t\t\t},\n\t\t\tDescription: pulumi.String(\"ActiveDirectory is the public representation of the active directory config.\"),\n\t\t\tEncryptDcConnections: pulumi.Bool(false),\n\t\t\tKdcHostname: pulumi.String(\"hostname\"),\n\t\t\tKdcIp: pulumi.String(\"10.10.0.11\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tLdapSigning: pulumi.Bool(false),\n\t\t\tNfsUsersWithLdap: pulumi.Bool(false),\n\t\t\tOrganizationalUnit: pulumi.String(\"CN=Computers\"),\n\t\t\tSecurityOperators: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"test1\"),\n\t\t\t\tpulumi.String(\"test2\"),\n\t\t\t},\n\t\t\tSite: pulumi.String(\"test-site\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.netapp.ActiveDirectory;\nimport com.pulumi.gcp.netapp.ActiveDirectoryArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var testActiveDirectoryFull = new ActiveDirectory(\"testActiveDirectoryFull\", ActiveDirectoryArgs.builder()\n .name(\"test-active-directory-full\")\n .location(\"us-central1\")\n .domain(\"ad.internal\")\n .dns(\"172.30.64.3\")\n .netBiosPrefix(\"smbserver\")\n .username(\"user\")\n .password(\"pass\")\n .aesEncryption(false)\n .backupOperators( \n \"test1\",\n \"test2\")\n .administrators( \n \"test1\",\n \"test2\")\n .description(\"ActiveDirectory is the public representation of the active directory config.\")\n .encryptDcConnections(false)\n .kdcHostname(\"hostname\")\n .kdcIp(\"10.10.0.11\")\n .labels(Map.of(\"foo\", \"bar\"))\n .ldapSigning(false)\n .nfsUsersWithLdap(false)\n .organizationalUnit(\"CN=Computers\")\n .securityOperators( \n \"test1\",\n \"test2\")\n .site(\"test-site\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n testActiveDirectoryFull:\n type: gcp:netapp:ActiveDirectory\n name: test_active_directory_full\n properties:\n name: test-active-directory-full\n location: us-central1\n domain: ad.internal\n dns: 172.30.64.3\n netBiosPrefix: smbserver\n username: user\n password: pass\n aesEncryption: false\n backupOperators:\n - test1\n - test2\n administrators:\n - test1\n - test2\n description: ActiveDirectory is the public representation of the active directory config.\n encryptDcConnections: false\n kdcHostname: hostname\n kdcIp: 10.10.0.11\n labels:\n foo: bar\n ldapSigning: false\n nfsUsersWithLdap: false\n organizationalUnit: CN=Computers\n securityOperators:\n - test1\n - test2\n site: test-site\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nActiveDirectory can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/activeDirectories/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, ActiveDirectory can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:netapp/activeDirectory:ActiveDirectory default projects/{{project}}/locations/{{location}}/activeDirectories/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/activeDirectory:ActiveDirectory default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/activeDirectory:ActiveDirectory default {{location}}/{{name}}\n```\n\n", "properties": { "administrators": { "type": "array", @@ -216837,7 +218417,7 @@ } }, "gcp:netapp/backup:Backup": { - "description": "NetApp Volumes supports volume backups, which are copies of your volumes\nstored independently from the volume. Backups are stored in backup vaults,\nwhich are containers for backups. If a volume is lost or deleted, you can\nuse backups to restore your data to a new volume.\n\nWhen you create the first backup of a volume, all of the volume's used\ndata is sent to the backup vault. Subsequent backups of the same volume\nonly include data that has changed from the previous backup. This allows\nfor fast incremental-forever backups and reduces the required capacity\ninside the backup vault.\n\nYou can create manual and scheduled backups. Manual backups can be taken\nfrom a volume or from an existing volume snapshot. Scheduled backups\nrequire a backup policy.\n\n\nTo get more information about backup, see:\n\n* [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults.backups)\n* How-to Guides\n * [Documentation](https://cloud.google.com/netapp/volumes/docs/protect-data/about-volume-backups)\n\n## Example Usage\n\n### Netapp Backup\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst default = gcp.compute.getNetwork({\n name: \"\",\n});\nconst defaultStoragePool = new gcp.netapp.StoragePool(\"default\", {\n name: \"backup-pool\",\n location: \"us-central1\",\n serviceLevel: \"PREMIUM\",\n capacityGib: \"2048\",\n network: _default.then(_default =\u003e _default.id),\n});\nconst defaultBackupVault = new gcp.netapp.BackupVault(\"default\", {\n name: \"backup-vault\",\n location: defaultStoragePool.location,\n});\nconst defaultVolume = new gcp.netapp.Volume(\"default\", {\n name: \"backup-volume\",\n location: defaultStoragePool.location,\n capacityGib: \"100\",\n shareName: \"backup-volume\",\n storagePool: defaultStoragePool.name,\n protocols: [\"NFSV3\"],\n deletionPolicy: \"FORCE\",\n backupConfig: {\n backupVault: defaultBackupVault.id,\n },\n});\nconst testBackup = new gcp.netapp.Backup(\"test_backup\", {\n name: \"test-backup\",\n location: defaultBackupVault.location,\n vaultName: defaultBackupVault.name,\n sourceVolume: defaultVolume.id,\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.compute.get_network(name=\"\")\ndefault_storage_pool = gcp.netapp.StoragePool(\"default\",\n name=\"backup-pool\",\n location=\"us-central1\",\n service_level=\"PREMIUM\",\n capacity_gib=\"2048\",\n network=default.id)\ndefault_backup_vault = gcp.netapp.BackupVault(\"default\",\n name=\"backup-vault\",\n location=default_storage_pool.location)\ndefault_volume = gcp.netapp.Volume(\"default\",\n name=\"backup-volume\",\n location=default_storage_pool.location,\n capacity_gib=\"100\",\n share_name=\"backup-volume\",\n storage_pool=default_storage_pool.name,\n protocols=[\"NFSV3\"],\n deletion_policy=\"FORCE\",\n backup_config={\n \"backup_vault\": default_backup_vault.id,\n })\ntest_backup = gcp.netapp.Backup(\"test_backup\",\n name=\"test-backup\",\n location=default_backup_vault.location,\n vault_name=default_backup_vault.name,\n source_volume=default_volume.id)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = Gcp.Compute.GetNetwork.Invoke(new()\n {\n Name = \"\",\n });\n\n var defaultStoragePool = new Gcp.Netapp.StoragePool(\"default\", new()\n {\n Name = \"backup-pool\",\n Location = \"us-central1\",\n ServiceLevel = \"PREMIUM\",\n CapacityGib = \"2048\",\n Network = @default.Apply(@default =\u003e @default.Apply(getNetworkResult =\u003e getNetworkResult.Id)),\n });\n\n var defaultBackupVault = new Gcp.Netapp.BackupVault(\"default\", new()\n {\n Name = \"backup-vault\",\n Location = defaultStoragePool.Location,\n });\n\n var defaultVolume = new Gcp.Netapp.Volume(\"default\", new()\n {\n Name = \"backup-volume\",\n Location = defaultStoragePool.Location,\n CapacityGib = \"100\",\n ShareName = \"backup-volume\",\n StoragePool = defaultStoragePool.Name,\n Protocols = new[]\n {\n \"NFSV3\",\n },\n DeletionPolicy = \"FORCE\",\n BackupConfig = new Gcp.Netapp.Inputs.VolumeBackupConfigArgs\n {\n BackupVault = defaultBackupVault.Id,\n },\n });\n\n var testBackup = new Gcp.Netapp.Backup(\"test_backup\", new()\n {\n Name = \"test-backup\",\n Location = defaultBackupVault.Location,\n VaultName = defaultBackupVault.Name,\n SourceVolume = defaultVolume.Id,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/netapp\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_default, err := compute.LookupNetwork(ctx, \u0026compute.LookupNetworkArgs{\n\t\t\tName: \"\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefaultStoragePool, err := netapp.NewStoragePool(ctx, \"default\", \u0026netapp.StoragePoolArgs{\n\t\t\tName: pulumi.String(\"backup-pool\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tServiceLevel: pulumi.String(\"PREMIUM\"),\n\t\t\tCapacityGib: pulumi.String(\"2048\"),\n\t\t\tNetwork: pulumi.String(_default.Id),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefaultBackupVault, err := netapp.NewBackupVault(ctx, \"default\", \u0026netapp.BackupVaultArgs{\n\t\t\tName: pulumi.String(\"backup-vault\"),\n\t\t\tLocation: defaultStoragePool.Location,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefaultVolume, err := netapp.NewVolume(ctx, \"default\", \u0026netapp.VolumeArgs{\n\t\t\tName: pulumi.String(\"backup-volume\"),\n\t\t\tLocation: defaultStoragePool.Location,\n\t\t\tCapacityGib: pulumi.String(\"100\"),\n\t\t\tShareName: pulumi.String(\"backup-volume\"),\n\t\t\tStoragePool: defaultStoragePool.Name,\n\t\t\tProtocols: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"NFSV3\"),\n\t\t\t},\n\t\t\tDeletionPolicy: pulumi.String(\"FORCE\"),\n\t\t\tBackupConfig: \u0026netapp.VolumeBackupConfigArgs{\n\t\t\t\tBackupVault: defaultBackupVault.ID(),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = netapp.NewBackup(ctx, \"test_backup\", \u0026netapp.BackupArgs{\n\t\t\tName: pulumi.String(\"test-backup\"),\n\t\t\tLocation: defaultBackupVault.Location,\n\t\t\tVaultName: defaultBackupVault.Name,\n\t\t\tSourceVolume: defaultVolume.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.ComputeFunctions;\nimport com.pulumi.gcp.compute.inputs.GetNetworkArgs;\nimport com.pulumi.gcp.netapp.StoragePool;\nimport com.pulumi.gcp.netapp.StoragePoolArgs;\nimport com.pulumi.gcp.netapp.BackupVault;\nimport com.pulumi.gcp.netapp.BackupVaultArgs;\nimport com.pulumi.gcp.netapp.Volume;\nimport com.pulumi.gcp.netapp.VolumeArgs;\nimport com.pulumi.gcp.netapp.inputs.VolumeBackupConfigArgs;\nimport com.pulumi.gcp.netapp.Backup;\nimport com.pulumi.gcp.netapp.BackupArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var default = ComputeFunctions.getNetwork(GetNetworkArgs.builder()\n .name(\"\")\n .build());\n\n var defaultStoragePool = new StoragePool(\"defaultStoragePool\", StoragePoolArgs.builder()\n .name(\"backup-pool\")\n .location(\"us-central1\")\n .serviceLevel(\"PREMIUM\")\n .capacityGib(\"2048\")\n .network(default_.id())\n .build());\n\n var defaultBackupVault = new BackupVault(\"defaultBackupVault\", BackupVaultArgs.builder()\n .name(\"backup-vault\")\n .location(defaultStoragePool.location())\n .build());\n\n var defaultVolume = new Volume(\"defaultVolume\", VolumeArgs.builder()\n .name(\"backup-volume\")\n .location(defaultStoragePool.location())\n .capacityGib(\"100\")\n .shareName(\"backup-volume\")\n .storagePool(defaultStoragePool.name())\n .protocols(\"NFSV3\")\n .deletionPolicy(\"FORCE\")\n .backupConfig(VolumeBackupConfigArgs.builder()\n .backupVault(defaultBackupVault.id())\n .build())\n .build());\n\n var testBackup = new Backup(\"testBackup\", BackupArgs.builder()\n .name(\"test-backup\")\n .location(defaultBackupVault.location())\n .vaultName(defaultBackupVault.name())\n .sourceVolume(defaultVolume.id())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n defaultStoragePool:\n type: gcp:netapp:StoragePool\n name: default\n properties:\n name: backup-pool\n location: us-central1\n serviceLevel: PREMIUM\n capacityGib: '2048'\n network: ${default.id}\n defaultVolume:\n type: gcp:netapp:Volume\n name: default\n properties:\n name: backup-volume\n location: ${defaultStoragePool.location}\n capacityGib: '100'\n shareName: backup-volume\n storagePool: ${defaultStoragePool.name}\n protocols:\n - NFSV3\n deletionPolicy: FORCE\n backupConfig:\n backupVault: ${defaultBackupVault.id}\n defaultBackupVault:\n type: gcp:netapp:BackupVault\n name: default\n properties:\n name: backup-vault\n location: ${defaultStoragePool.location}\n testBackup:\n type: gcp:netapp:Backup\n name: test_backup\n properties:\n name: test-backup\n location: ${defaultBackupVault.location}\n vaultName: ${defaultBackupVault.name}\n sourceVolume: ${defaultVolume.id}\nvariables:\n default:\n fn::invoke:\n Function: gcp:compute:getNetwork\n Arguments:\n name:\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nbackup can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}}`\n\n* `{{project}}/{{location}}/{{vault_name}}/{{name}}`\n\n* `{{location}}/{{vault_name}}/{{name}}`\n\nWhen using the `pulumi import` command, backup can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:netapp/backup:Backup default projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/backup:Backup default {{project}}/{{location}}/{{vault_name}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/backup:Backup default {{location}}/{{vault_name}}/{{name}}\n```\n\n", + "description": "NetApp Volumes supports volume backups, which are copies of your volumes\nstored independently from the volume. Backups are stored in backup vaults,\nwhich are containers for backups. If a volume is lost or deleted, you can\nuse backups to restore your data to a new volume.\n\nWhen you create the first backup of a volume, all of the volume's used\ndata is sent to the backup vault. Subsequent backups of the same volume\nonly include data that has changed from the previous backup. This allows\nfor fast incremental-forever backups and reduces the required capacity\ninside the backup vault.\n\nYou can create manual and scheduled backups. Manual backups can be taken\nfrom a volume or from an existing volume snapshot. Scheduled backups\nrequire a backup policy.\n\n\nTo get more information about Backup, see:\n\n* [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults.backups)\n* How-to Guides\n * [Documentation](https://cloud.google.com/netapp/volumes/docs/protect-data/about-volume-backups)\n\n## Example Usage\n\n### Netapp Backup\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst default = gcp.compute.getNetwork({\n name: \"\",\n});\nconst defaultStoragePool = new gcp.netapp.StoragePool(\"default\", {\n name: \"backup-pool\",\n location: \"us-central1\",\n serviceLevel: \"PREMIUM\",\n capacityGib: \"2048\",\n network: _default.then(_default =\u003e _default.id),\n});\nconst defaultBackupVault = new gcp.netapp.BackupVault(\"default\", {\n name: \"backup-vault\",\n location: defaultStoragePool.location,\n});\nconst defaultVolume = new gcp.netapp.Volume(\"default\", {\n name: \"backup-volume\",\n location: defaultStoragePool.location,\n capacityGib: \"100\",\n shareName: \"backup-volume\",\n storagePool: defaultStoragePool.name,\n protocols: [\"NFSV3\"],\n deletionPolicy: \"FORCE\",\n backupConfig: {\n backupVault: defaultBackupVault.id,\n },\n});\nconst testBackup = new gcp.netapp.Backup(\"test_backup\", {\n name: \"test-backup\",\n location: defaultBackupVault.location,\n vaultName: defaultBackupVault.name,\n sourceVolume: defaultVolume.id,\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.compute.get_network(name=\"\")\ndefault_storage_pool = gcp.netapp.StoragePool(\"default\",\n name=\"backup-pool\",\n location=\"us-central1\",\n service_level=\"PREMIUM\",\n capacity_gib=\"2048\",\n network=default.id)\ndefault_backup_vault = gcp.netapp.BackupVault(\"default\",\n name=\"backup-vault\",\n location=default_storage_pool.location)\ndefault_volume = gcp.netapp.Volume(\"default\",\n name=\"backup-volume\",\n location=default_storage_pool.location,\n capacity_gib=\"100\",\n share_name=\"backup-volume\",\n storage_pool=default_storage_pool.name,\n protocols=[\"NFSV3\"],\n deletion_policy=\"FORCE\",\n backup_config={\n \"backup_vault\": default_backup_vault.id,\n })\ntest_backup = gcp.netapp.Backup(\"test_backup\",\n name=\"test-backup\",\n location=default_backup_vault.location,\n vault_name=default_backup_vault.name,\n source_volume=default_volume.id)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = Gcp.Compute.GetNetwork.Invoke(new()\n {\n Name = \"\",\n });\n\n var defaultStoragePool = new Gcp.Netapp.StoragePool(\"default\", new()\n {\n Name = \"backup-pool\",\n Location = \"us-central1\",\n ServiceLevel = \"PREMIUM\",\n CapacityGib = \"2048\",\n Network = @default.Apply(@default =\u003e @default.Apply(getNetworkResult =\u003e getNetworkResult.Id)),\n });\n\n var defaultBackupVault = new Gcp.Netapp.BackupVault(\"default\", new()\n {\n Name = \"backup-vault\",\n Location = defaultStoragePool.Location,\n });\n\n var defaultVolume = new Gcp.Netapp.Volume(\"default\", new()\n {\n Name = \"backup-volume\",\n Location = defaultStoragePool.Location,\n CapacityGib = \"100\",\n ShareName = \"backup-volume\",\n StoragePool = defaultStoragePool.Name,\n Protocols = new[]\n {\n \"NFSV3\",\n },\n DeletionPolicy = \"FORCE\",\n BackupConfig = new Gcp.Netapp.Inputs.VolumeBackupConfigArgs\n {\n BackupVault = defaultBackupVault.Id,\n },\n });\n\n var testBackup = new Gcp.Netapp.Backup(\"test_backup\", new()\n {\n Name = \"test-backup\",\n Location = defaultBackupVault.Location,\n VaultName = defaultBackupVault.Name,\n SourceVolume = defaultVolume.Id,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/netapp\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_default, err := compute.LookupNetwork(ctx, \u0026compute.LookupNetworkArgs{\n\t\t\tName: \"\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefaultStoragePool, err := netapp.NewStoragePool(ctx, \"default\", \u0026netapp.StoragePoolArgs{\n\t\t\tName: pulumi.String(\"backup-pool\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tServiceLevel: pulumi.String(\"PREMIUM\"),\n\t\t\tCapacityGib: pulumi.String(\"2048\"),\n\t\t\tNetwork: pulumi.String(_default.Id),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefaultBackupVault, err := netapp.NewBackupVault(ctx, \"default\", \u0026netapp.BackupVaultArgs{\n\t\t\tName: pulumi.String(\"backup-vault\"),\n\t\t\tLocation: defaultStoragePool.Location,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefaultVolume, err := netapp.NewVolume(ctx, \"default\", \u0026netapp.VolumeArgs{\n\t\t\tName: pulumi.String(\"backup-volume\"),\n\t\t\tLocation: defaultStoragePool.Location,\n\t\t\tCapacityGib: pulumi.String(\"100\"),\n\t\t\tShareName: pulumi.String(\"backup-volume\"),\n\t\t\tStoragePool: defaultStoragePool.Name,\n\t\t\tProtocols: pulumi.StringArray{\n\t\t\t\tpulumi.String(\"NFSV3\"),\n\t\t\t},\n\t\t\tDeletionPolicy: pulumi.String(\"FORCE\"),\n\t\t\tBackupConfig: \u0026netapp.VolumeBackupConfigArgs{\n\t\t\t\tBackupVault: defaultBackupVault.ID(),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = netapp.NewBackup(ctx, \"test_backup\", \u0026netapp.BackupArgs{\n\t\t\tName: pulumi.String(\"test-backup\"),\n\t\t\tLocation: defaultBackupVault.Location,\n\t\t\tVaultName: defaultBackupVault.Name,\n\t\t\tSourceVolume: defaultVolume.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.ComputeFunctions;\nimport com.pulumi.gcp.compute.inputs.GetNetworkArgs;\nimport com.pulumi.gcp.netapp.StoragePool;\nimport com.pulumi.gcp.netapp.StoragePoolArgs;\nimport com.pulumi.gcp.netapp.BackupVault;\nimport com.pulumi.gcp.netapp.BackupVaultArgs;\nimport com.pulumi.gcp.netapp.Volume;\nimport com.pulumi.gcp.netapp.VolumeArgs;\nimport com.pulumi.gcp.netapp.inputs.VolumeBackupConfigArgs;\nimport com.pulumi.gcp.netapp.Backup;\nimport com.pulumi.gcp.netapp.BackupArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var default = ComputeFunctions.getNetwork(GetNetworkArgs.builder()\n .name(\"\")\n .build());\n\n var defaultStoragePool = new StoragePool(\"defaultStoragePool\", StoragePoolArgs.builder()\n .name(\"backup-pool\")\n .location(\"us-central1\")\n .serviceLevel(\"PREMIUM\")\n .capacityGib(\"2048\")\n .network(default_.id())\n .build());\n\n var defaultBackupVault = new BackupVault(\"defaultBackupVault\", BackupVaultArgs.builder()\n .name(\"backup-vault\")\n .location(defaultStoragePool.location())\n .build());\n\n var defaultVolume = new Volume(\"defaultVolume\", VolumeArgs.builder()\n .name(\"backup-volume\")\n .location(defaultStoragePool.location())\n .capacityGib(\"100\")\n .shareName(\"backup-volume\")\n .storagePool(defaultStoragePool.name())\n .protocols(\"NFSV3\")\n .deletionPolicy(\"FORCE\")\n .backupConfig(VolumeBackupConfigArgs.builder()\n .backupVault(defaultBackupVault.id())\n .build())\n .build());\n\n var testBackup = new Backup(\"testBackup\", BackupArgs.builder()\n .name(\"test-backup\")\n .location(defaultBackupVault.location())\n .vaultName(defaultBackupVault.name())\n .sourceVolume(defaultVolume.id())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n defaultStoragePool:\n type: gcp:netapp:StoragePool\n name: default\n properties:\n name: backup-pool\n location: us-central1\n serviceLevel: PREMIUM\n capacityGib: '2048'\n network: ${default.id}\n defaultVolume:\n type: gcp:netapp:Volume\n name: default\n properties:\n name: backup-volume\n location: ${defaultStoragePool.location}\n capacityGib: '100'\n shareName: backup-volume\n storagePool: ${defaultStoragePool.name}\n protocols:\n - NFSV3\n deletionPolicy: FORCE\n backupConfig:\n backupVault: ${defaultBackupVault.id}\n defaultBackupVault:\n type: gcp:netapp:BackupVault\n name: default\n properties:\n name: backup-vault\n location: ${defaultStoragePool.location}\n testBackup:\n type: gcp:netapp:Backup\n name: test_backup\n properties:\n name: test-backup\n location: ${defaultBackupVault.location}\n vaultName: ${defaultBackupVault.name}\n sourceVolume: ${defaultVolume.id}\nvariables:\n default:\n fn::invoke:\n Function: gcp:compute:getNetwork\n Arguments:\n name:\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nBackup can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}}`\n\n* `{{project}}/{{location}}/{{vault_name}}/{{name}}`\n\n* `{{location}}/{{vault_name}}/{{name}}`\n\nWhen using the `pulumi import` command, Backup can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:netapp/backup:Backup default projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/backup:Backup default {{project}}/{{location}}/{{vault_name}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/backup:Backup default {{location}}/{{vault_name}}/{{name}}\n```\n\n", "properties": { "backupType": { "type": "string", @@ -217054,7 +218634,7 @@ } }, "gcp:netapp/backupPolicy:BackupPolicy": { - "description": "A backup policy is used to schedule backups at regular daily, weekly, or monthly intervals.\nBackup policies allow you to attach a backup schedule to a volume.\nThe policy defines how many backups to retain at daily, weekly, or monthly intervals.\n\n\nTo get more information about backupPolicy, see:\n\n* [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupPolicies)\n* How-to Guides\n * [Documentation](https://cloud.google.com/netapp/volumes/docs/protect-data/about-volume-backups#about_backup_policies)\n\n## Example Usage\n\n### Netapp Backup Policy Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst testBackupPolicyFull = new gcp.netapp.BackupPolicy(\"test_backup_policy_full\", {\n name: \"test-backup-policy-full\",\n location: \"us-central1\",\n dailyBackupLimit: 2,\n weeklyBackupLimit: 1,\n monthlyBackupLimit: 1,\n description: \"TF test backup schedule\",\n enabled: true,\n labels: {\n foo: \"bar\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ntest_backup_policy_full = gcp.netapp.BackupPolicy(\"test_backup_policy_full\",\n name=\"test-backup-policy-full\",\n location=\"us-central1\",\n daily_backup_limit=2,\n weekly_backup_limit=1,\n monthly_backup_limit=1,\n description=\"TF test backup schedule\",\n enabled=True,\n labels={\n \"foo\": \"bar\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var testBackupPolicyFull = new Gcp.Netapp.BackupPolicy(\"test_backup_policy_full\", new()\n {\n Name = \"test-backup-policy-full\",\n Location = \"us-central1\",\n DailyBackupLimit = 2,\n WeeklyBackupLimit = 1,\n MonthlyBackupLimit = 1,\n Description = \"TF test backup schedule\",\n Enabled = true,\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/netapp\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := netapp.NewBackupPolicy(ctx, \"test_backup_policy_full\", \u0026netapp.BackupPolicyArgs{\n\t\t\tName: pulumi.String(\"test-backup-policy-full\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDailyBackupLimit: pulumi.Int(2),\n\t\t\tWeeklyBackupLimit: pulumi.Int(1),\n\t\t\tMonthlyBackupLimit: pulumi.Int(1),\n\t\t\tDescription: pulumi.String(\"TF test backup schedule\"),\n\t\t\tEnabled: pulumi.Bool(true),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.netapp.BackupPolicy;\nimport com.pulumi.gcp.netapp.BackupPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var testBackupPolicyFull = new BackupPolicy(\"testBackupPolicyFull\", BackupPolicyArgs.builder()\n .name(\"test-backup-policy-full\")\n .location(\"us-central1\")\n .dailyBackupLimit(2)\n .weeklyBackupLimit(1)\n .monthlyBackupLimit(1)\n .description(\"TF test backup schedule\")\n .enabled(true)\n .labels(Map.of(\"foo\", \"bar\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n testBackupPolicyFull:\n type: gcp:netapp:BackupPolicy\n name: test_backup_policy_full\n properties:\n name: test-backup-policy-full\n location: us-central1\n dailyBackupLimit: 2\n weeklyBackupLimit: 1\n monthlyBackupLimit: 1\n description: TF test backup schedule\n enabled: true\n labels:\n foo: bar\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nbackupPolicy can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/backupPolicies/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, backupPolicy can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:netapp/backupPolicy:BackupPolicy default projects/{{project}}/locations/{{location}}/backupPolicies/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/backupPolicy:BackupPolicy default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/backupPolicy:BackupPolicy default {{location}}/{{name}}\n```\n\n", + "description": "A backup policy is used to schedule backups at regular daily, weekly, or monthly intervals.\nBackup policies allow you to attach a backup schedule to a volume.\nThe policy defines how many backups to retain at daily, weekly, or monthly intervals.\n\n\nTo get more information about BackupPolicy, see:\n\n* [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupPolicies)\n* How-to Guides\n * [Documentation](https://cloud.google.com/netapp/volumes/docs/protect-data/about-volume-backups#about_backup_policies)\n\n## Example Usage\n\n### Netapp Backup Policy Full\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst testBackupPolicyFull = new gcp.netapp.BackupPolicy(\"test_backup_policy_full\", {\n name: \"test-backup-policy-full\",\n location: \"us-central1\",\n dailyBackupLimit: 2,\n weeklyBackupLimit: 1,\n monthlyBackupLimit: 1,\n description: \"TF test backup schedule\",\n enabled: true,\n labels: {\n foo: \"bar\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ntest_backup_policy_full = gcp.netapp.BackupPolicy(\"test_backup_policy_full\",\n name=\"test-backup-policy-full\",\n location=\"us-central1\",\n daily_backup_limit=2,\n weekly_backup_limit=1,\n monthly_backup_limit=1,\n description=\"TF test backup schedule\",\n enabled=True,\n labels={\n \"foo\": \"bar\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var testBackupPolicyFull = new Gcp.Netapp.BackupPolicy(\"test_backup_policy_full\", new()\n {\n Name = \"test-backup-policy-full\",\n Location = \"us-central1\",\n DailyBackupLimit = 2,\n WeeklyBackupLimit = 1,\n MonthlyBackupLimit = 1,\n Description = \"TF test backup schedule\",\n Enabled = true,\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/netapp\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := netapp.NewBackupPolicy(ctx, \"test_backup_policy_full\", \u0026netapp.BackupPolicyArgs{\n\t\t\tName: pulumi.String(\"test-backup-policy-full\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDailyBackupLimit: pulumi.Int(2),\n\t\t\tWeeklyBackupLimit: pulumi.Int(1),\n\t\t\tMonthlyBackupLimit: pulumi.Int(1),\n\t\t\tDescription: pulumi.String(\"TF test backup schedule\"),\n\t\t\tEnabled: pulumi.Bool(true),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.netapp.BackupPolicy;\nimport com.pulumi.gcp.netapp.BackupPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var testBackupPolicyFull = new BackupPolicy(\"testBackupPolicyFull\", BackupPolicyArgs.builder()\n .name(\"test-backup-policy-full\")\n .location(\"us-central1\")\n .dailyBackupLimit(2)\n .weeklyBackupLimit(1)\n .monthlyBackupLimit(1)\n .description(\"TF test backup schedule\")\n .enabled(true)\n .labels(Map.of(\"foo\", \"bar\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n testBackupPolicyFull:\n type: gcp:netapp:BackupPolicy\n name: test_backup_policy_full\n properties:\n name: test-backup-policy-full\n location: us-central1\n dailyBackupLimit: 2\n weeklyBackupLimit: 1\n monthlyBackupLimit: 1\n description: TF test backup schedule\n enabled: true\n labels:\n foo: bar\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nBackupPolicy can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/backupPolicies/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, BackupPolicy can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:netapp/backupPolicy:BackupPolicy default projects/{{project}}/locations/{{location}}/backupPolicies/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/backupPolicy:BackupPolicy default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/backupPolicy:BackupPolicy default {{location}}/{{name}}\n```\n\n", "properties": { "assignedVolumeCount": { "type": "integer", @@ -217265,7 +218845,7 @@ } }, "gcp:netapp/backupVault:BackupVault": { - "description": "A backup vault is the location where backups are stored. You can only create one backup vault per region.\nA vault can hold multiple backups for multiple volumes in that region.\n\n\nTo get more information about backupVault, see:\n\n* [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults)\n* How-to Guides\n * [Documentation](https://cloud.google.com/netapp/volumes/docs/protect-data/about-volume-backups)\n\n## Example Usage\n\n### Netapp Backup Vault\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst testBackupVault = new gcp.netapp.BackupVault(\"test_backup_vault\", {\n name: \"test-backup-vault\",\n location: \"us-central1\",\n description: \"Terraform created vault\",\n labels: {\n creator: \"testuser\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ntest_backup_vault = gcp.netapp.BackupVault(\"test_backup_vault\",\n name=\"test-backup-vault\",\n location=\"us-central1\",\n description=\"Terraform created vault\",\n labels={\n \"creator\": \"testuser\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var testBackupVault = new Gcp.Netapp.BackupVault(\"test_backup_vault\", new()\n {\n Name = \"test-backup-vault\",\n Location = \"us-central1\",\n Description = \"Terraform created vault\",\n Labels = \n {\n { \"creator\", \"testuser\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/netapp\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := netapp.NewBackupVault(ctx, \"test_backup_vault\", \u0026netapp.BackupVaultArgs{\n\t\t\tName: pulumi.String(\"test-backup-vault\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDescription: pulumi.String(\"Terraform created vault\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"creator\": pulumi.String(\"testuser\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.netapp.BackupVault;\nimport com.pulumi.gcp.netapp.BackupVaultArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var testBackupVault = new BackupVault(\"testBackupVault\", BackupVaultArgs.builder()\n .name(\"test-backup-vault\")\n .location(\"us-central1\")\n .description(\"Terraform created vault\")\n .labels(Map.of(\"creator\", \"testuser\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n testBackupVault:\n type: gcp:netapp:BackupVault\n name: test_backup_vault\n properties:\n name: test-backup-vault\n location: us-central1\n description: Terraform created vault\n labels:\n creator: testuser\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nbackupVault can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/backupVaults/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, backupVault can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:netapp/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/backupVault:BackupVault default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/backupVault:BackupVault default {{location}}/{{name}}\n```\n\n", + "description": "A backup vault is the location where backups are stored. You can only create one backup vault per region.\nA vault can hold multiple backups for multiple volumes in that region.\n\n\nTo get more information about BackupVault, see:\n\n* [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults)\n* How-to Guides\n * [Documentation](https://cloud.google.com/netapp/volumes/docs/protect-data/about-volume-backups)\n\n## Example Usage\n\n### Netapp Backup Vault\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst testBackupVault = new gcp.netapp.BackupVault(\"test_backup_vault\", {\n name: \"test-backup-vault\",\n location: \"us-central1\",\n description: \"Terraform created vault\",\n labels: {\n creator: \"testuser\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ntest_backup_vault = gcp.netapp.BackupVault(\"test_backup_vault\",\n name=\"test-backup-vault\",\n location=\"us-central1\",\n description=\"Terraform created vault\",\n labels={\n \"creator\": \"testuser\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var testBackupVault = new Gcp.Netapp.BackupVault(\"test_backup_vault\", new()\n {\n Name = \"test-backup-vault\",\n Location = \"us-central1\",\n Description = \"Terraform created vault\",\n Labels = \n {\n { \"creator\", \"testuser\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/netapp\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := netapp.NewBackupVault(ctx, \"test_backup_vault\", \u0026netapp.BackupVaultArgs{\n\t\t\tName: pulumi.String(\"test-backup-vault\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDescription: pulumi.String(\"Terraform created vault\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"creator\": pulumi.String(\"testuser\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.netapp.BackupVault;\nimport com.pulumi.gcp.netapp.BackupVaultArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var testBackupVault = new BackupVault(\"testBackupVault\", BackupVaultArgs.builder()\n .name(\"test-backup-vault\")\n .location(\"us-central1\")\n .description(\"Terraform created vault\")\n .labels(Map.of(\"creator\", \"testuser\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n testBackupVault:\n type: gcp:netapp:BackupVault\n name: test_backup_vault\n properties:\n name: test-backup-vault\n location: us-central1\n description: Terraform created vault\n labels:\n creator: testuser\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nBackupVault can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/backupVaults/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, BackupVault can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:netapp/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/backupVault:BackupVault default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/backupVault:BackupVault default {{location}}/{{name}}\n```\n\n", "properties": { "createTime": { "type": "string", @@ -217575,7 +219155,7 @@ } }, "gcp:netapp/storagePool:StoragePool": { - "description": "## Example Usage\n\n### Storage Pool Create\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\n// Create a network or use datasource to reference existing network\nconst peeringNetwork = new gcp.compute.Network(\"peering_network\", {name: \"test-network\"});\n// Reserve a CIDR for NetApp Volumes to use\n// When using shared-VPCs, this resource needs to be created in host project\nconst privateIpAlloc = new gcp.compute.GlobalAddress(\"private_ip_alloc\", {\n name: \"test-address\",\n purpose: \"VPC_PEERING\",\n addressType: \"INTERNAL\",\n prefixLength: 16,\n network: peeringNetwork.id,\n});\n// Create a Private Service Access connection\n// When using shared-VPCs, this resource needs to be created in host project\nconst _default = new gcp.servicenetworking.Connection(\"default\", {\n network: peeringNetwork.id,\n service: \"netapp.servicenetworking.goog\",\n reservedPeeringRanges: [privateIpAlloc.name],\n});\n// Modify the PSA Connection to allow import/export of custom routes\n// When using shared-VPCs, this resource needs to be created in host project\nconst routeUpdates = new gcp.compute.NetworkPeeringRoutesConfig(\"route_updates\", {\n peering: _default.peering,\n network: peeringNetwork.name,\n importCustomRoutes: true,\n exportCustomRoutes: true,\n});\n// Create a storage pool\n// Create this resource in the project which is expected to own the volumes\nconst testPool = new gcp.netapp.StoragePool(\"test_pool\", {\n name: \"test-pool\",\n location: \"us-central1\",\n serviceLevel: \"PREMIUM\",\n capacityGib: \"2048\",\n network: peeringNetwork.id,\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\n# Create a network or use datasource to reference existing network\npeering_network = gcp.compute.Network(\"peering_network\", name=\"test-network\")\n# Reserve a CIDR for NetApp Volumes to use\n# When using shared-VPCs, this resource needs to be created in host project\nprivate_ip_alloc = gcp.compute.GlobalAddress(\"private_ip_alloc\",\n name=\"test-address\",\n purpose=\"VPC_PEERING\",\n address_type=\"INTERNAL\",\n prefix_length=16,\n network=peering_network.id)\n# Create a Private Service Access connection\n# When using shared-VPCs, this resource needs to be created in host project\ndefault = gcp.servicenetworking.Connection(\"default\",\n network=peering_network.id,\n service=\"netapp.servicenetworking.goog\",\n reserved_peering_ranges=[private_ip_alloc.name])\n# Modify the PSA Connection to allow import/export of custom routes\n# When using shared-VPCs, this resource needs to be created in host project\nroute_updates = gcp.compute.NetworkPeeringRoutesConfig(\"route_updates\",\n peering=default.peering,\n network=peering_network.name,\n import_custom_routes=True,\n export_custom_routes=True)\n# Create a storage pool\n# Create this resource in the project which is expected to own the volumes\ntest_pool = gcp.netapp.StoragePool(\"test_pool\",\n name=\"test-pool\",\n location=\"us-central1\",\n service_level=\"PREMIUM\",\n capacity_gib=\"2048\",\n network=peering_network.id)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n // Create a network or use datasource to reference existing network\n var peeringNetwork = new Gcp.Compute.Network(\"peering_network\", new()\n {\n Name = \"test-network\",\n });\n\n // Reserve a CIDR for NetApp Volumes to use\n // When using shared-VPCs, this resource needs to be created in host project\n var privateIpAlloc = new Gcp.Compute.GlobalAddress(\"private_ip_alloc\", new()\n {\n Name = \"test-address\",\n Purpose = \"VPC_PEERING\",\n AddressType = \"INTERNAL\",\n PrefixLength = 16,\n Network = peeringNetwork.Id,\n });\n\n // Create a Private Service Access connection\n // When using shared-VPCs, this resource needs to be created in host project\n var @default = new Gcp.ServiceNetworking.Connection(\"default\", new()\n {\n Network = peeringNetwork.Id,\n Service = \"netapp.servicenetworking.goog\",\n ReservedPeeringRanges = new[]\n {\n privateIpAlloc.Name,\n },\n });\n\n // Modify the PSA Connection to allow import/export of custom routes\n // When using shared-VPCs, this resource needs to be created in host project\n var routeUpdates = new Gcp.Compute.NetworkPeeringRoutesConfig(\"route_updates\", new()\n {\n Peering = @default.Peering,\n Network = peeringNetwork.Name,\n ImportCustomRoutes = true,\n ExportCustomRoutes = true,\n });\n\n // Create a storage pool\n // Create this resource in the project which is expected to own the volumes\n var testPool = new Gcp.Netapp.StoragePool(\"test_pool\", new()\n {\n Name = \"test-pool\",\n Location = \"us-central1\",\n ServiceLevel = \"PREMIUM\",\n CapacityGib = \"2048\",\n Network = peeringNetwork.Id,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/netapp\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/servicenetworking\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t// Create a network or use datasource to reference existing network\n\t\tpeeringNetwork, err := compute.NewNetwork(ctx, \"peering_network\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"test-network\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Reserve a CIDR for NetApp Volumes to use\n\t\t// When using shared-VPCs, this resource needs to be created in host project\n\t\tprivateIpAlloc, err := compute.NewGlobalAddress(ctx, \"private_ip_alloc\", \u0026compute.GlobalAddressArgs{\n\t\t\tName: pulumi.String(\"test-address\"),\n\t\t\tPurpose: pulumi.String(\"VPC_PEERING\"),\n\t\t\tAddressType: pulumi.String(\"INTERNAL\"),\n\t\t\tPrefixLength: pulumi.Int(16),\n\t\t\tNetwork: peeringNetwork.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Create a Private Service Access connection\n\t\t// When using shared-VPCs, this resource needs to be created in host project\n\t\t_, err = servicenetworking.NewConnection(ctx, \"default\", \u0026servicenetworking.ConnectionArgs{\n\t\t\tNetwork: peeringNetwork.ID(),\n\t\t\tService: pulumi.String(\"netapp.servicenetworking.goog\"),\n\t\t\tReservedPeeringRanges: pulumi.StringArray{\n\t\t\t\tprivateIpAlloc.Name,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Modify the PSA Connection to allow import/export of custom routes\n\t\t// When using shared-VPCs, this resource needs to be created in host project\n\t\t_, err = compute.NewNetworkPeeringRoutesConfig(ctx, \"route_updates\", \u0026compute.NetworkPeeringRoutesConfigArgs{\n\t\t\tPeering: _default.Peering,\n\t\t\tNetwork: peeringNetwork.Name,\n\t\t\tImportCustomRoutes: pulumi.Bool(true),\n\t\t\tExportCustomRoutes: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Create a storage pool\n\t\t// Create this resource in the project which is expected to own the volumes\n\t\t_, err = netapp.NewStoragePool(ctx, \"test_pool\", \u0026netapp.StoragePoolArgs{\n\t\t\tName: pulumi.String(\"test-pool\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tServiceLevel: pulumi.String(\"PREMIUM\"),\n\t\t\tCapacityGib: pulumi.String(\"2048\"),\n\t\t\tNetwork: peeringNetwork.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.compute.GlobalAddress;\nimport com.pulumi.gcp.compute.GlobalAddressArgs;\nimport com.pulumi.gcp.servicenetworking.Connection;\nimport com.pulumi.gcp.servicenetworking.ConnectionArgs;\nimport com.pulumi.gcp.compute.NetworkPeeringRoutesConfig;\nimport com.pulumi.gcp.compute.NetworkPeeringRoutesConfigArgs;\nimport com.pulumi.gcp.netapp.StoragePool;\nimport com.pulumi.gcp.netapp.StoragePoolArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n // Create a network or use datasource to reference existing network\n var peeringNetwork = new Network(\"peeringNetwork\", NetworkArgs.builder()\n .name(\"test-network\")\n .build());\n\n // Reserve a CIDR for NetApp Volumes to use\n // When using shared-VPCs, this resource needs to be created in host project\n var privateIpAlloc = new GlobalAddress(\"privateIpAlloc\", GlobalAddressArgs.builder()\n .name(\"test-address\")\n .purpose(\"VPC_PEERING\")\n .addressType(\"INTERNAL\")\n .prefixLength(16)\n .network(peeringNetwork.id())\n .build());\n\n // Create a Private Service Access connection\n // When using shared-VPCs, this resource needs to be created in host project\n var default_ = new Connection(\"default\", ConnectionArgs.builder()\n .network(peeringNetwork.id())\n .service(\"netapp.servicenetworking.goog\")\n .reservedPeeringRanges(privateIpAlloc.name())\n .build());\n\n // Modify the PSA Connection to allow import/export of custom routes\n // When using shared-VPCs, this resource needs to be created in host project\n var routeUpdates = new NetworkPeeringRoutesConfig(\"routeUpdates\", NetworkPeeringRoutesConfigArgs.builder()\n .peering(default_.peering())\n .network(peeringNetwork.name())\n .importCustomRoutes(true)\n .exportCustomRoutes(true)\n .build());\n\n // Create a storage pool\n // Create this resource in the project which is expected to own the volumes\n var testPool = new StoragePool(\"testPool\", StoragePoolArgs.builder()\n .name(\"test-pool\")\n .location(\"us-central1\")\n .serviceLevel(\"PREMIUM\")\n .capacityGib(\"2048\")\n .network(peeringNetwork.id())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n # Create a network or use datasource to reference existing network\n peeringNetwork:\n type: gcp:compute:Network\n name: peering_network\n properties:\n name: test-network\n # Reserve a CIDR for NetApp Volumes to use\n # When using shared-VPCs, this resource needs to be created in host project\n privateIpAlloc:\n type: gcp:compute:GlobalAddress\n name: private_ip_alloc\n properties:\n name: test-address\n purpose: VPC_PEERING\n addressType: INTERNAL\n prefixLength: 16\n network: ${peeringNetwork.id}\n # Create a Private Service Access connection\n # When using shared-VPCs, this resource needs to be created in host project\n default:\n type: gcp:servicenetworking:Connection\n properties:\n network: ${peeringNetwork.id}\n service: netapp.servicenetworking.goog\n reservedPeeringRanges:\n - ${privateIpAlloc.name}\n # Modify the PSA Connection to allow import/export of custom routes\n # When using shared-VPCs, this resource needs to be created in host project\n routeUpdates:\n type: gcp:compute:NetworkPeeringRoutesConfig\n name: route_updates\n properties:\n peering: ${default.peering}\n network: ${peeringNetwork.name}\n importCustomRoutes: true\n exportCustomRoutes: true\n # Create a storage pool\n # Create this resource in the project which is expected to own the volumes\n testPool:\n type: gcp:netapp:StoragePool\n name: test_pool\n properties:\n name: test-pool\n location: us-central1\n serviceLevel: PREMIUM\n capacityGib: '2048'\n network: ${peeringNetwork.id}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nstoragePool can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/storagePools/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, storagePool can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:netapp/storagePool:StoragePool default projects/{{project}}/locations/{{location}}/storagePools/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/storagePool:StoragePool default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/storagePool:StoragePool default {{location}}/{{name}}\n```\n\n", + "description": "## Example Usage\n\n### Storage Pool Create\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\n// Create a network or use datasource to reference existing network\nconst peeringNetwork = new gcp.compute.Network(\"peering_network\", {name: \"test-network\"});\n// Reserve a CIDR for NetApp Volumes to use\n// When using shared-VPCs, this resource needs to be created in host project\nconst privateIpAlloc = new gcp.compute.GlobalAddress(\"private_ip_alloc\", {\n name: \"test-address\",\n purpose: \"VPC_PEERING\",\n addressType: \"INTERNAL\",\n prefixLength: 16,\n network: peeringNetwork.id,\n});\n// Create a Private Service Access connection\n// When using shared-VPCs, this resource needs to be created in host project\nconst _default = new gcp.servicenetworking.Connection(\"default\", {\n network: peeringNetwork.id,\n service: \"netapp.servicenetworking.goog\",\n reservedPeeringRanges: [privateIpAlloc.name],\n});\n// Modify the PSA Connection to allow import/export of custom routes\n// When using shared-VPCs, this resource needs to be created in host project\nconst routeUpdates = new gcp.compute.NetworkPeeringRoutesConfig(\"route_updates\", {\n peering: _default.peering,\n network: peeringNetwork.name,\n importCustomRoutes: true,\n exportCustomRoutes: true,\n});\n// Create a storage pool\n// Create this resource in the project which is expected to own the volumes\nconst testPool = new gcp.netapp.StoragePool(\"test_pool\", {\n name: \"test-pool\",\n location: \"us-central1\",\n serviceLevel: \"PREMIUM\",\n capacityGib: \"2048\",\n network: peeringNetwork.id,\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\n# Create a network or use datasource to reference existing network\npeering_network = gcp.compute.Network(\"peering_network\", name=\"test-network\")\n# Reserve a CIDR for NetApp Volumes to use\n# When using shared-VPCs, this resource needs to be created in host project\nprivate_ip_alloc = gcp.compute.GlobalAddress(\"private_ip_alloc\",\n name=\"test-address\",\n purpose=\"VPC_PEERING\",\n address_type=\"INTERNAL\",\n prefix_length=16,\n network=peering_network.id)\n# Create a Private Service Access connection\n# When using shared-VPCs, this resource needs to be created in host project\ndefault = gcp.servicenetworking.Connection(\"default\",\n network=peering_network.id,\n service=\"netapp.servicenetworking.goog\",\n reserved_peering_ranges=[private_ip_alloc.name])\n# Modify the PSA Connection to allow import/export of custom routes\n# When using shared-VPCs, this resource needs to be created in host project\nroute_updates = gcp.compute.NetworkPeeringRoutesConfig(\"route_updates\",\n peering=default.peering,\n network=peering_network.name,\n import_custom_routes=True,\n export_custom_routes=True)\n# Create a storage pool\n# Create this resource in the project which is expected to own the volumes\ntest_pool = gcp.netapp.StoragePool(\"test_pool\",\n name=\"test-pool\",\n location=\"us-central1\",\n service_level=\"PREMIUM\",\n capacity_gib=\"2048\",\n network=peering_network.id)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n // Create a network or use datasource to reference existing network\n var peeringNetwork = new Gcp.Compute.Network(\"peering_network\", new()\n {\n Name = \"test-network\",\n });\n\n // Reserve a CIDR for NetApp Volumes to use\n // When using shared-VPCs, this resource needs to be created in host project\n var privateIpAlloc = new Gcp.Compute.GlobalAddress(\"private_ip_alloc\", new()\n {\n Name = \"test-address\",\n Purpose = \"VPC_PEERING\",\n AddressType = \"INTERNAL\",\n PrefixLength = 16,\n Network = peeringNetwork.Id,\n });\n\n // Create a Private Service Access connection\n // When using shared-VPCs, this resource needs to be created in host project\n var @default = new Gcp.ServiceNetworking.Connection(\"default\", new()\n {\n Network = peeringNetwork.Id,\n Service = \"netapp.servicenetworking.goog\",\n ReservedPeeringRanges = new[]\n {\n privateIpAlloc.Name,\n },\n });\n\n // Modify the PSA Connection to allow import/export of custom routes\n // When using shared-VPCs, this resource needs to be created in host project\n var routeUpdates = new Gcp.Compute.NetworkPeeringRoutesConfig(\"route_updates\", new()\n {\n Peering = @default.Peering,\n Network = peeringNetwork.Name,\n ImportCustomRoutes = true,\n ExportCustomRoutes = true,\n });\n\n // Create a storage pool\n // Create this resource in the project which is expected to own the volumes\n var testPool = new Gcp.Netapp.StoragePool(\"test_pool\", new()\n {\n Name = \"test-pool\",\n Location = \"us-central1\",\n ServiceLevel = \"PREMIUM\",\n CapacityGib = \"2048\",\n Network = peeringNetwork.Id,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/netapp\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/servicenetworking\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t// Create a network or use datasource to reference existing network\n\t\tpeeringNetwork, err := compute.NewNetwork(ctx, \"peering_network\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"test-network\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Reserve a CIDR for NetApp Volumes to use\n\t\t// When using shared-VPCs, this resource needs to be created in host project\n\t\tprivateIpAlloc, err := compute.NewGlobalAddress(ctx, \"private_ip_alloc\", \u0026compute.GlobalAddressArgs{\n\t\t\tName: pulumi.String(\"test-address\"),\n\t\t\tPurpose: pulumi.String(\"VPC_PEERING\"),\n\t\t\tAddressType: pulumi.String(\"INTERNAL\"),\n\t\t\tPrefixLength: pulumi.Int(16),\n\t\t\tNetwork: peeringNetwork.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Create a Private Service Access connection\n\t\t// When using shared-VPCs, this resource needs to be created in host project\n\t\t_, err = servicenetworking.NewConnection(ctx, \"default\", \u0026servicenetworking.ConnectionArgs{\n\t\t\tNetwork: peeringNetwork.ID(),\n\t\t\tService: pulumi.String(\"netapp.servicenetworking.goog\"),\n\t\t\tReservedPeeringRanges: pulumi.StringArray{\n\t\t\t\tprivateIpAlloc.Name,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Modify the PSA Connection to allow import/export of custom routes\n\t\t// When using shared-VPCs, this resource needs to be created in host project\n\t\t_, err = compute.NewNetworkPeeringRoutesConfig(ctx, \"route_updates\", \u0026compute.NetworkPeeringRoutesConfigArgs{\n\t\t\tPeering: _default.Peering,\n\t\t\tNetwork: peeringNetwork.Name,\n\t\t\tImportCustomRoutes: pulumi.Bool(true),\n\t\t\tExportCustomRoutes: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Create a storage pool\n\t\t// Create this resource in the project which is expected to own the volumes\n\t\t_, err = netapp.NewStoragePool(ctx, \"test_pool\", \u0026netapp.StoragePoolArgs{\n\t\t\tName: pulumi.String(\"test-pool\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tServiceLevel: pulumi.String(\"PREMIUM\"),\n\t\t\tCapacityGib: pulumi.String(\"2048\"),\n\t\t\tNetwork: peeringNetwork.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.compute.GlobalAddress;\nimport com.pulumi.gcp.compute.GlobalAddressArgs;\nimport com.pulumi.gcp.servicenetworking.Connection;\nimport com.pulumi.gcp.servicenetworking.ConnectionArgs;\nimport com.pulumi.gcp.compute.NetworkPeeringRoutesConfig;\nimport com.pulumi.gcp.compute.NetworkPeeringRoutesConfigArgs;\nimport com.pulumi.gcp.netapp.StoragePool;\nimport com.pulumi.gcp.netapp.StoragePoolArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n // Create a network or use datasource to reference existing network\n var peeringNetwork = new Network(\"peeringNetwork\", NetworkArgs.builder()\n .name(\"test-network\")\n .build());\n\n // Reserve a CIDR for NetApp Volumes to use\n // When using shared-VPCs, this resource needs to be created in host project\n var privateIpAlloc = new GlobalAddress(\"privateIpAlloc\", GlobalAddressArgs.builder()\n .name(\"test-address\")\n .purpose(\"VPC_PEERING\")\n .addressType(\"INTERNAL\")\n .prefixLength(16)\n .network(peeringNetwork.id())\n .build());\n\n // Create a Private Service Access connection\n // When using shared-VPCs, this resource needs to be created in host project\n var default_ = new Connection(\"default\", ConnectionArgs.builder()\n .network(peeringNetwork.id())\n .service(\"netapp.servicenetworking.goog\")\n .reservedPeeringRanges(privateIpAlloc.name())\n .build());\n\n // Modify the PSA Connection to allow import/export of custom routes\n // When using shared-VPCs, this resource needs to be created in host project\n var routeUpdates = new NetworkPeeringRoutesConfig(\"routeUpdates\", NetworkPeeringRoutesConfigArgs.builder()\n .peering(default_.peering())\n .network(peeringNetwork.name())\n .importCustomRoutes(true)\n .exportCustomRoutes(true)\n .build());\n\n // Create a storage pool\n // Create this resource in the project which is expected to own the volumes\n var testPool = new StoragePool(\"testPool\", StoragePoolArgs.builder()\n .name(\"test-pool\")\n .location(\"us-central1\")\n .serviceLevel(\"PREMIUM\")\n .capacityGib(\"2048\")\n .network(peeringNetwork.id())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n # Create a network or use datasource to reference existing network\n peeringNetwork:\n type: gcp:compute:Network\n name: peering_network\n properties:\n name: test-network\n # Reserve a CIDR for NetApp Volumes to use\n # When using shared-VPCs, this resource needs to be created in host project\n privateIpAlloc:\n type: gcp:compute:GlobalAddress\n name: private_ip_alloc\n properties:\n name: test-address\n purpose: VPC_PEERING\n addressType: INTERNAL\n prefixLength: 16\n network: ${peeringNetwork.id}\n # Create a Private Service Access connection\n # When using shared-VPCs, this resource needs to be created in host project\n default:\n type: gcp:servicenetworking:Connection\n properties:\n network: ${peeringNetwork.id}\n service: netapp.servicenetworking.goog\n reservedPeeringRanges:\n - ${privateIpAlloc.name}\n # Modify the PSA Connection to allow import/export of custom routes\n # When using shared-VPCs, this resource needs to be created in host project\n routeUpdates:\n type: gcp:compute:NetworkPeeringRoutesConfig\n name: route_updates\n properties:\n peering: ${default.peering}\n network: ${peeringNetwork.name}\n importCustomRoutes: true\n exportCustomRoutes: true\n # Create a storage pool\n # Create this resource in the project which is expected to own the volumes\n testPool:\n type: gcp:netapp:StoragePool\n name: test_pool\n properties:\n name: test-pool\n location: us-central1\n serviceLevel: PREMIUM\n capacityGib: '2048'\n network: ${peeringNetwork.id}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nStoragePool can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/storagePools/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, StoragePool can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:netapp/storagePool:StoragePool default projects/{{project}}/locations/{{location}}/storagePools/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/storagePool:StoragePool default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:netapp/storagePool:StoragePool default {{location}}/{{name}}\n```\n\n", "properties": { "activeDirectory": { "type": "string", @@ -217862,7 +219442,7 @@ }, "deletionPolicy": { "type": "string", - "description": "Policy to determine if the volume should be deleted forcefully.\nVolumes may have nested snapshot resources. Deleting such a volume will fail.\nSetting this parameter to FORCE will delete volumes including nested snapshots.\n" + "description": "Policy to determine if the volume should be deleted forcefully.\nVolumes may have nested snapshot resources. Deleting such a volume will fail.\nSetting this parameter to FORCE will delete volumes including nested snapshots.\nPossible values: DEFAULT, FORCE.\n" }, "description": { "type": "string", @@ -218056,7 +219636,7 @@ }, "deletionPolicy": { "type": "string", - "description": "Policy to determine if the volume should be deleted forcefully.\nVolumes may have nested snapshot resources. Deleting such a volume will fail.\nSetting this parameter to FORCE will delete volumes including nested snapshots.\n" + "description": "Policy to determine if the volume should be deleted forcefully.\nVolumes may have nested snapshot resources. Deleting such a volume will fail.\nSetting this parameter to FORCE will delete volumes including nested snapshots.\nPossible values: DEFAULT, FORCE.\n" }, "description": { "type": "string", @@ -218175,7 +219755,7 @@ }, "deletionPolicy": { "type": "string", - "description": "Policy to determine if the volume should be deleted forcefully.\nVolumes may have nested snapshot resources. Deleting such a volume will fail.\nSetting this parameter to FORCE will delete volumes including nested snapshots.\n" + "description": "Policy to determine if the volume should be deleted forcefully.\nVolumes may have nested snapshot resources. Deleting such a volume will fail.\nSetting this parameter to FORCE will delete volumes including nested snapshots.\nPossible values: DEFAULT, FORCE.\n" }, "description": { "type": "string", @@ -219931,7 +221511,7 @@ } }, "gcp:networkconnectivity/spoke:Spoke": { - "description": "The NetworkConnectivity Spoke resource\n\n\nTo get more information about Spoke, see:\n\n* [API documentation](https://cloud.google.com/network-connectivity/docs/reference/networkconnectivity/rest/v1beta/projects.locations.spokes)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/network-connectivity/docs/network-connectivity-center/concepts/overview)\n\n## Example Usage\n\n### Network Connectivity Spoke Linked Vpc Network Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst network = new gcp.compute.Network(\"network\", {\n name: \"net\",\n autoCreateSubnetworks: false,\n});\nconst basicHub = new gcp.networkconnectivity.Hub(\"basic_hub\", {\n name: \"hub1\",\n description: \"A sample hub\",\n labels: {\n \"label-two\": \"value-one\",\n },\n});\nconst primary = new gcp.networkconnectivity.Spoke(\"primary\", {\n name: \"spoke1\",\n location: \"global\",\n description: \"A sample spoke with a linked router appliance instance\",\n labels: {\n \"label-one\": \"value-one\",\n },\n hub: basicHub.id,\n linkedVpcNetwork: {\n excludeExportRanges: [\n \"198.51.100.0/24\",\n \"10.10.0.0/16\",\n ],\n uri: network.selfLink,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nnetwork = gcp.compute.Network(\"network\",\n name=\"net\",\n auto_create_subnetworks=False)\nbasic_hub = gcp.networkconnectivity.Hub(\"basic_hub\",\n name=\"hub1\",\n description=\"A sample hub\",\n labels={\n \"label-two\": \"value-one\",\n })\nprimary = gcp.networkconnectivity.Spoke(\"primary\",\n name=\"spoke1\",\n location=\"global\",\n description=\"A sample spoke with a linked router appliance instance\",\n labels={\n \"label-one\": \"value-one\",\n },\n hub=basic_hub.id,\n linked_vpc_network={\n \"exclude_export_ranges\": [\n \"198.51.100.0/24\",\n \"10.10.0.0/16\",\n ],\n \"uri\": network.self_link,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var network = new Gcp.Compute.Network(\"network\", new()\n {\n Name = \"net\",\n AutoCreateSubnetworks = false,\n });\n\n var basicHub = new Gcp.NetworkConnectivity.Hub(\"basic_hub\", new()\n {\n Name = \"hub1\",\n Description = \"A sample hub\",\n Labels = \n {\n { \"label-two\", \"value-one\" },\n },\n });\n\n var primary = new Gcp.NetworkConnectivity.Spoke(\"primary\", new()\n {\n Name = \"spoke1\",\n Location = \"global\",\n Description = \"A sample spoke with a linked router appliance instance\",\n Labels = \n {\n { \"label-one\", \"value-one\" },\n },\n Hub = basicHub.Id,\n LinkedVpcNetwork = new Gcp.NetworkConnectivity.Inputs.SpokeLinkedVpcNetworkArgs\n {\n ExcludeExportRanges = new[]\n {\n \"198.51.100.0/24\",\n \"10.10.0.0/16\",\n },\n Uri = network.SelfLink,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networkconnectivity\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tnetwork, err := compute.NewNetwork(ctx, \"network\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"net\"),\n\t\t\tAutoCreateSubnetworks: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbasicHub, err := networkconnectivity.NewHub(ctx, \"basic_hub\", \u0026networkconnectivity.HubArgs{\n\t\t\tName: pulumi.String(\"hub1\"),\n\t\t\tDescription: pulumi.String(\"A sample hub\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"label-two\": pulumi.String(\"value-one\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = networkconnectivity.NewSpoke(ctx, \"primary\", \u0026networkconnectivity.SpokeArgs{\n\t\t\tName: pulumi.String(\"spoke1\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tDescription: pulumi.String(\"A sample spoke with a linked router appliance instance\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"label-one\": pulumi.String(\"value-one\"),\n\t\t\t},\n\t\t\tHub: basicHub.ID(),\n\t\t\tLinkedVpcNetwork: \u0026networkconnectivity.SpokeLinkedVpcNetworkArgs{\n\t\t\t\tExcludeExportRanges: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"198.51.100.0/24\"),\n\t\t\t\t\tpulumi.String(\"10.10.0.0/16\"),\n\t\t\t\t},\n\t\t\t\tUri: network.SelfLink,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.networkconnectivity.Hub;\nimport com.pulumi.gcp.networkconnectivity.HubArgs;\nimport com.pulumi.gcp.networkconnectivity.Spoke;\nimport com.pulumi.gcp.networkconnectivity.SpokeArgs;\nimport com.pulumi.gcp.networkconnectivity.inputs.SpokeLinkedVpcNetworkArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var network = new Network(\"network\", NetworkArgs.builder()\n .name(\"net\")\n .autoCreateSubnetworks(false)\n .build());\n\n var basicHub = new Hub(\"basicHub\", HubArgs.builder()\n .name(\"hub1\")\n .description(\"A sample hub\")\n .labels(Map.of(\"label-two\", \"value-one\"))\n .build());\n\n var primary = new Spoke(\"primary\", SpokeArgs.builder()\n .name(\"spoke1\")\n .location(\"global\")\n .description(\"A sample spoke with a linked router appliance instance\")\n .labels(Map.of(\"label-one\", \"value-one\"))\n .hub(basicHub.id())\n .linkedVpcNetwork(SpokeLinkedVpcNetworkArgs.builder()\n .excludeExportRanges( \n \"198.51.100.0/24\",\n \"10.10.0.0/16\")\n .uri(network.selfLink())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n network:\n type: gcp:compute:Network\n properties:\n name: net\n autoCreateSubnetworks: false\n basicHub:\n type: gcp:networkconnectivity:Hub\n name: basic_hub\n properties:\n name: hub1\n description: A sample hub\n labels:\n label-two: value-one\n primary:\n type: gcp:networkconnectivity:Spoke\n properties:\n name: spoke1\n location: global\n description: A sample spoke with a linked router appliance instance\n labels:\n label-one: value-one\n hub: ${basicHub.id}\n linkedVpcNetwork:\n excludeExportRanges:\n - 198.51.100.0/24\n - 10.10.0.0/16\n uri: ${network.selfLink}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Network Connectivity Spoke Router Appliance Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst network = new gcp.compute.Network(\"network\", {\n name: \"tf-test-network_2067\",\n autoCreateSubnetworks: false,\n});\nconst subnetwork = new gcp.compute.Subnetwork(\"subnetwork\", {\n name: \"tf-test-subnet_40785\",\n ipCidrRange: \"10.0.0.0/28\",\n region: \"us-central1\",\n network: network.selfLink,\n});\nconst instance = new gcp.compute.Instance(\"instance\", {\n name: \"tf-test-instance_79169\",\n machineType: \"e2-medium\",\n canIpForward: true,\n zone: \"us-central1-a\",\n bootDisk: {\n initializeParams: {\n image: \"projects/debian-cloud/global/images/debian-10-buster-v20210817\",\n },\n },\n networkInterfaces: [{\n subnetwork: subnetwork.name,\n networkIp: \"10.0.0.2\",\n accessConfigs: [{\n networkTier: \"PREMIUM\",\n }],\n }],\n});\nconst basicHub = new gcp.networkconnectivity.Hub(\"basic_hub\", {\n name: \"tf-test-hub_56529\",\n description: \"A sample hub\",\n labels: {\n \"label-two\": \"value-one\",\n },\n});\nconst primary = new gcp.networkconnectivity.Spoke(\"primary\", {\n name: \"tf-test-name_75413\",\n location: \"us-central1\",\n description: \"A sample spoke with a linked routher appliance instance\",\n labels: {\n \"label-one\": \"value-one\",\n },\n hub: basicHub.id,\n linkedRouterApplianceInstances: {\n instances: [{\n virtualMachine: instance.selfLink,\n ipAddress: \"10.0.0.2\",\n }],\n siteToSiteDataTransfer: true,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nnetwork = gcp.compute.Network(\"network\",\n name=\"tf-test-network_2067\",\n auto_create_subnetworks=False)\nsubnetwork = gcp.compute.Subnetwork(\"subnetwork\",\n name=\"tf-test-subnet_40785\",\n ip_cidr_range=\"10.0.0.0/28\",\n region=\"us-central1\",\n network=network.self_link)\ninstance = gcp.compute.Instance(\"instance\",\n name=\"tf-test-instance_79169\",\n machine_type=\"e2-medium\",\n can_ip_forward=True,\n zone=\"us-central1-a\",\n boot_disk={\n \"initialize_params\": {\n \"image\": \"projects/debian-cloud/global/images/debian-10-buster-v20210817\",\n },\n },\n network_interfaces=[{\n \"subnetwork\": subnetwork.name,\n \"network_ip\": \"10.0.0.2\",\n \"access_configs\": [{\n \"network_tier\": \"PREMIUM\",\n }],\n }])\nbasic_hub = gcp.networkconnectivity.Hub(\"basic_hub\",\n name=\"tf-test-hub_56529\",\n description=\"A sample hub\",\n labels={\n \"label-two\": \"value-one\",\n })\nprimary = gcp.networkconnectivity.Spoke(\"primary\",\n name=\"tf-test-name_75413\",\n location=\"us-central1\",\n description=\"A sample spoke with a linked routher appliance instance\",\n labels={\n \"label-one\": \"value-one\",\n },\n hub=basic_hub.id,\n linked_router_appliance_instances={\n \"instances\": [{\n \"virtual_machine\": instance.self_link,\n \"ip_address\": \"10.0.0.2\",\n }],\n \"site_to_site_data_transfer\": True,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var network = new Gcp.Compute.Network(\"network\", new()\n {\n Name = \"tf-test-network_2067\",\n AutoCreateSubnetworks = false,\n });\n\n var subnetwork = new Gcp.Compute.Subnetwork(\"subnetwork\", new()\n {\n Name = \"tf-test-subnet_40785\",\n IpCidrRange = \"10.0.0.0/28\",\n Region = \"us-central1\",\n Network = network.SelfLink,\n });\n\n var instance = new Gcp.Compute.Instance(\"instance\", new()\n {\n Name = \"tf-test-instance_79169\",\n MachineType = \"e2-medium\",\n CanIpForward = true,\n Zone = \"us-central1-a\",\n BootDisk = new Gcp.Compute.Inputs.InstanceBootDiskArgs\n {\n InitializeParams = new Gcp.Compute.Inputs.InstanceBootDiskInitializeParamsArgs\n {\n Image = \"projects/debian-cloud/global/images/debian-10-buster-v20210817\",\n },\n },\n NetworkInterfaces = new[]\n {\n new Gcp.Compute.Inputs.InstanceNetworkInterfaceArgs\n {\n Subnetwork = subnetwork.Name,\n NetworkIp = \"10.0.0.2\",\n AccessConfigs = new[]\n {\n new Gcp.Compute.Inputs.InstanceNetworkInterfaceAccessConfigArgs\n {\n NetworkTier = \"PREMIUM\",\n },\n },\n },\n },\n });\n\n var basicHub = new Gcp.NetworkConnectivity.Hub(\"basic_hub\", new()\n {\n Name = \"tf-test-hub_56529\",\n Description = \"A sample hub\",\n Labels = \n {\n { \"label-two\", \"value-one\" },\n },\n });\n\n var primary = new Gcp.NetworkConnectivity.Spoke(\"primary\", new()\n {\n Name = \"tf-test-name_75413\",\n Location = \"us-central1\",\n Description = \"A sample spoke with a linked routher appliance instance\",\n Labels = \n {\n { \"label-one\", \"value-one\" },\n },\n Hub = basicHub.Id,\n LinkedRouterApplianceInstances = new Gcp.NetworkConnectivity.Inputs.SpokeLinkedRouterApplianceInstancesArgs\n {\n Instances = new[]\n {\n new Gcp.NetworkConnectivity.Inputs.SpokeLinkedRouterApplianceInstancesInstanceArgs\n {\n VirtualMachine = instance.SelfLink,\n IpAddress = \"10.0.0.2\",\n },\n },\n SiteToSiteDataTransfer = true,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networkconnectivity\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tnetwork, err := compute.NewNetwork(ctx, \"network\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"tf-test-network_2067\"),\n\t\t\tAutoCreateSubnetworks: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsubnetwork, err := compute.NewSubnetwork(ctx, \"subnetwork\", \u0026compute.SubnetworkArgs{\n\t\t\tName: pulumi.String(\"tf-test-subnet_40785\"),\n\t\t\tIpCidrRange: pulumi.String(\"10.0.0.0/28\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tNetwork: network.SelfLink,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinstance, err := compute.NewInstance(ctx, \"instance\", \u0026compute.InstanceArgs{\n\t\t\tName: pulumi.String(\"tf-test-instance_79169\"),\n\t\t\tMachineType: pulumi.String(\"e2-medium\"),\n\t\t\tCanIpForward: pulumi.Bool(true),\n\t\t\tZone: pulumi.String(\"us-central1-a\"),\n\t\t\tBootDisk: \u0026compute.InstanceBootDiskArgs{\n\t\t\t\tInitializeParams: \u0026compute.InstanceBootDiskInitializeParamsArgs{\n\t\t\t\t\tImage: pulumi.String(\"projects/debian-cloud/global/images/debian-10-buster-v20210817\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tNetworkInterfaces: compute.InstanceNetworkInterfaceArray{\n\t\t\t\t\u0026compute.InstanceNetworkInterfaceArgs{\n\t\t\t\t\tSubnetwork: subnetwork.Name,\n\t\t\t\t\tNetworkIp: pulumi.String(\"10.0.0.2\"),\n\t\t\t\t\tAccessConfigs: compute.InstanceNetworkInterfaceAccessConfigArray{\n\t\t\t\t\t\t\u0026compute.InstanceNetworkInterfaceAccessConfigArgs{\n\t\t\t\t\t\t\tNetworkTier: pulumi.String(\"PREMIUM\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbasicHub, err := networkconnectivity.NewHub(ctx, \"basic_hub\", \u0026networkconnectivity.HubArgs{\n\t\t\tName: pulumi.String(\"tf-test-hub_56529\"),\n\t\t\tDescription: pulumi.String(\"A sample hub\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"label-two\": pulumi.String(\"value-one\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = networkconnectivity.NewSpoke(ctx, \"primary\", \u0026networkconnectivity.SpokeArgs{\n\t\t\tName: pulumi.String(\"tf-test-name_75413\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDescription: pulumi.String(\"A sample spoke with a linked routher appliance instance\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"label-one\": pulumi.String(\"value-one\"),\n\t\t\t},\n\t\t\tHub: basicHub.ID(),\n\t\t\tLinkedRouterApplianceInstances: \u0026networkconnectivity.SpokeLinkedRouterApplianceInstancesArgs{\n\t\t\t\tInstances: networkconnectivity.SpokeLinkedRouterApplianceInstancesInstanceArray{\n\t\t\t\t\t\u0026networkconnectivity.SpokeLinkedRouterApplianceInstancesInstanceArgs{\n\t\t\t\t\t\tVirtualMachine: instance.SelfLink,\n\t\t\t\t\t\tIpAddress: pulumi.String(\"10.0.0.2\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSiteToSiteDataTransfer: pulumi.Bool(true),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.compute.Subnetwork;\nimport com.pulumi.gcp.compute.SubnetworkArgs;\nimport com.pulumi.gcp.compute.Instance;\nimport com.pulumi.gcp.compute.InstanceArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceBootDiskArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceBootDiskInitializeParamsArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceNetworkInterfaceArgs;\nimport com.pulumi.gcp.networkconnectivity.Hub;\nimport com.pulumi.gcp.networkconnectivity.HubArgs;\nimport com.pulumi.gcp.networkconnectivity.Spoke;\nimport com.pulumi.gcp.networkconnectivity.SpokeArgs;\nimport com.pulumi.gcp.networkconnectivity.inputs.SpokeLinkedRouterApplianceInstancesArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var network = new Network(\"network\", NetworkArgs.builder()\n .name(\"tf-test-network_2067\")\n .autoCreateSubnetworks(false)\n .build());\n\n var subnetwork = new Subnetwork(\"subnetwork\", SubnetworkArgs.builder()\n .name(\"tf-test-subnet_40785\")\n .ipCidrRange(\"10.0.0.0/28\")\n .region(\"us-central1\")\n .network(network.selfLink())\n .build());\n\n var instance = new Instance(\"instance\", InstanceArgs.builder()\n .name(\"tf-test-instance_79169\")\n .machineType(\"e2-medium\")\n .canIpForward(true)\n .zone(\"us-central1-a\")\n .bootDisk(InstanceBootDiskArgs.builder()\n .initializeParams(InstanceBootDiskInitializeParamsArgs.builder()\n .image(\"projects/debian-cloud/global/images/debian-10-buster-v20210817\")\n .build())\n .build())\n .networkInterfaces(InstanceNetworkInterfaceArgs.builder()\n .subnetwork(subnetwork.name())\n .networkIp(\"10.0.0.2\")\n .accessConfigs(InstanceNetworkInterfaceAccessConfigArgs.builder()\n .networkTier(\"PREMIUM\")\n .build())\n .build())\n .build());\n\n var basicHub = new Hub(\"basicHub\", HubArgs.builder()\n .name(\"tf-test-hub_56529\")\n .description(\"A sample hub\")\n .labels(Map.of(\"label-two\", \"value-one\"))\n .build());\n\n var primary = new Spoke(\"primary\", SpokeArgs.builder()\n .name(\"tf-test-name_75413\")\n .location(\"us-central1\")\n .description(\"A sample spoke with a linked routher appliance instance\")\n .labels(Map.of(\"label-one\", \"value-one\"))\n .hub(basicHub.id())\n .linkedRouterApplianceInstances(SpokeLinkedRouterApplianceInstancesArgs.builder()\n .instances(SpokeLinkedRouterApplianceInstancesInstanceArgs.builder()\n .virtualMachine(instance.selfLink())\n .ipAddress(\"10.0.0.2\")\n .build())\n .siteToSiteDataTransfer(true)\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n network:\n type: gcp:compute:Network\n properties:\n name: tf-test-network_2067\n autoCreateSubnetworks: false\n subnetwork:\n type: gcp:compute:Subnetwork\n properties:\n name: tf-test-subnet_40785\n ipCidrRange: 10.0.0.0/28\n region: us-central1\n network: ${network.selfLink}\n instance:\n type: gcp:compute:Instance\n properties:\n name: tf-test-instance_79169\n machineType: e2-medium\n canIpForward: true\n zone: us-central1-a\n bootDisk:\n initializeParams:\n image: projects/debian-cloud/global/images/debian-10-buster-v20210817\n networkInterfaces:\n - subnetwork: ${subnetwork.name}\n networkIp: 10.0.0.2\n accessConfigs:\n - networkTier: PREMIUM\n basicHub:\n type: gcp:networkconnectivity:Hub\n name: basic_hub\n properties:\n name: tf-test-hub_56529\n description: A sample hub\n labels:\n label-two: value-one\n primary:\n type: gcp:networkconnectivity:Spoke\n properties:\n name: tf-test-name_75413\n location: us-central1\n description: A sample spoke with a linked routher appliance instance\n labels:\n label-one: value-one\n hub: ${basicHub.id}\n linkedRouterApplianceInstances:\n instances:\n - virtualMachine: ${instance.selfLink}\n ipAddress: 10.0.0.2\n siteToSiteDataTransfer: true\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nSpoke can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/spokes/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, Spoke can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:networkconnectivity/spoke:Spoke default projects/{{project}}/locations/{{location}}/spokes/{{name}}\n```\n\n```sh\n$ pulumi import gcp:networkconnectivity/spoke:Spoke default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:networkconnectivity/spoke:Spoke default {{location}}/{{name}}\n```\n\n", + "description": "The NetworkConnectivity Spoke resource\n\n\nTo get more information about Spoke, see:\n\n* [API documentation](https://cloud.google.com/network-connectivity/docs/reference/networkconnectivity/rest/v1beta/projects.locations.spokes)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/network-connectivity/docs/network-connectivity-center/concepts/overview)\n\n## Example Usage\n\n### Network Connectivity Spoke Linked Vpc Network Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst network = new gcp.compute.Network(\"network\", {\n name: \"net\",\n autoCreateSubnetworks: false,\n});\nconst basicHub = new gcp.networkconnectivity.Hub(\"basic_hub\", {\n name: \"hub1\",\n description: \"A sample hub\",\n labels: {\n \"label-two\": \"value-one\",\n },\n});\nconst primary = new gcp.networkconnectivity.Spoke(\"primary\", {\n name: \"spoke1\",\n location: \"global\",\n description: \"A sample spoke with a linked router appliance instance\",\n labels: {\n \"label-one\": \"value-one\",\n },\n hub: basicHub.id,\n linkedVpcNetwork: {\n excludeExportRanges: [\n \"198.51.100.0/24\",\n \"10.10.0.0/16\",\n ],\n includeExportRanges: [\n \"198.51.100.0/23\",\n \"10.0.0.0/8\",\n ],\n uri: network.selfLink,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nnetwork = gcp.compute.Network(\"network\",\n name=\"net\",\n auto_create_subnetworks=False)\nbasic_hub = gcp.networkconnectivity.Hub(\"basic_hub\",\n name=\"hub1\",\n description=\"A sample hub\",\n labels={\n \"label-two\": \"value-one\",\n })\nprimary = gcp.networkconnectivity.Spoke(\"primary\",\n name=\"spoke1\",\n location=\"global\",\n description=\"A sample spoke with a linked router appliance instance\",\n labels={\n \"label-one\": \"value-one\",\n },\n hub=basic_hub.id,\n linked_vpc_network={\n \"exclude_export_ranges\": [\n \"198.51.100.0/24\",\n \"10.10.0.0/16\",\n ],\n \"include_export_ranges\": [\n \"198.51.100.0/23\",\n \"10.0.0.0/8\",\n ],\n \"uri\": network.self_link,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var network = new Gcp.Compute.Network(\"network\", new()\n {\n Name = \"net\",\n AutoCreateSubnetworks = false,\n });\n\n var basicHub = new Gcp.NetworkConnectivity.Hub(\"basic_hub\", new()\n {\n Name = \"hub1\",\n Description = \"A sample hub\",\n Labels = \n {\n { \"label-two\", \"value-one\" },\n },\n });\n\n var primary = new Gcp.NetworkConnectivity.Spoke(\"primary\", new()\n {\n Name = \"spoke1\",\n Location = \"global\",\n Description = \"A sample spoke with a linked router appliance instance\",\n Labels = \n {\n { \"label-one\", \"value-one\" },\n },\n Hub = basicHub.Id,\n LinkedVpcNetwork = new Gcp.NetworkConnectivity.Inputs.SpokeLinkedVpcNetworkArgs\n {\n ExcludeExportRanges = new[]\n {\n \"198.51.100.0/24\",\n \"10.10.0.0/16\",\n },\n IncludeExportRanges = new[]\n {\n \"198.51.100.0/23\",\n \"10.0.0.0/8\",\n },\n Uri = network.SelfLink,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networkconnectivity\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tnetwork, err := compute.NewNetwork(ctx, \"network\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"net\"),\n\t\t\tAutoCreateSubnetworks: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbasicHub, err := networkconnectivity.NewHub(ctx, \"basic_hub\", \u0026networkconnectivity.HubArgs{\n\t\t\tName: pulumi.String(\"hub1\"),\n\t\t\tDescription: pulumi.String(\"A sample hub\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"label-two\": pulumi.String(\"value-one\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = networkconnectivity.NewSpoke(ctx, \"primary\", \u0026networkconnectivity.SpokeArgs{\n\t\t\tName: pulumi.String(\"spoke1\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tDescription: pulumi.String(\"A sample spoke with a linked router appliance instance\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"label-one\": pulumi.String(\"value-one\"),\n\t\t\t},\n\t\t\tHub: basicHub.ID(),\n\t\t\tLinkedVpcNetwork: \u0026networkconnectivity.SpokeLinkedVpcNetworkArgs{\n\t\t\t\tExcludeExportRanges: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"198.51.100.0/24\"),\n\t\t\t\t\tpulumi.String(\"10.10.0.0/16\"),\n\t\t\t\t},\n\t\t\t\tIncludeExportRanges: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"198.51.100.0/23\"),\n\t\t\t\t\tpulumi.String(\"10.0.0.0/8\"),\n\t\t\t\t},\n\t\t\t\tUri: network.SelfLink,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.networkconnectivity.Hub;\nimport com.pulumi.gcp.networkconnectivity.HubArgs;\nimport com.pulumi.gcp.networkconnectivity.Spoke;\nimport com.pulumi.gcp.networkconnectivity.SpokeArgs;\nimport com.pulumi.gcp.networkconnectivity.inputs.SpokeLinkedVpcNetworkArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var network = new Network(\"network\", NetworkArgs.builder()\n .name(\"net\")\n .autoCreateSubnetworks(false)\n .build());\n\n var basicHub = new Hub(\"basicHub\", HubArgs.builder()\n .name(\"hub1\")\n .description(\"A sample hub\")\n .labels(Map.of(\"label-two\", \"value-one\"))\n .build());\n\n var primary = new Spoke(\"primary\", SpokeArgs.builder()\n .name(\"spoke1\")\n .location(\"global\")\n .description(\"A sample spoke with a linked router appliance instance\")\n .labels(Map.of(\"label-one\", \"value-one\"))\n .hub(basicHub.id())\n .linkedVpcNetwork(SpokeLinkedVpcNetworkArgs.builder()\n .excludeExportRanges( \n \"198.51.100.0/24\",\n \"10.10.0.0/16\")\n .includeExportRanges( \n \"198.51.100.0/23\",\n \"10.0.0.0/8\")\n .uri(network.selfLink())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n network:\n type: gcp:compute:Network\n properties:\n name: net\n autoCreateSubnetworks: false\n basicHub:\n type: gcp:networkconnectivity:Hub\n name: basic_hub\n properties:\n name: hub1\n description: A sample hub\n labels:\n label-two: value-one\n primary:\n type: gcp:networkconnectivity:Spoke\n properties:\n name: spoke1\n location: global\n description: A sample spoke with a linked router appliance instance\n labels:\n label-one: value-one\n hub: ${basicHub.id}\n linkedVpcNetwork:\n excludeExportRanges:\n - 198.51.100.0/24\n - 10.10.0.0/16\n includeExportRanges:\n - 198.51.100.0/23\n - 10.0.0.0/8\n uri: ${network.selfLink}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Network Connectivity Spoke Router Appliance Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst network = new gcp.compute.Network(\"network\", {\n name: \"tf-test-network_2067\",\n autoCreateSubnetworks: false,\n});\nconst subnetwork = new gcp.compute.Subnetwork(\"subnetwork\", {\n name: \"tf-test-subnet_40785\",\n ipCidrRange: \"10.0.0.0/28\",\n region: \"us-central1\",\n network: network.selfLink,\n});\nconst instance = new gcp.compute.Instance(\"instance\", {\n name: \"tf-test-instance_79169\",\n machineType: \"e2-medium\",\n canIpForward: true,\n zone: \"us-central1-a\",\n bootDisk: {\n initializeParams: {\n image: \"projects/debian-cloud/global/images/debian-10-buster-v20210817\",\n },\n },\n networkInterfaces: [{\n subnetwork: subnetwork.name,\n networkIp: \"10.0.0.2\",\n accessConfigs: [{\n networkTier: \"PREMIUM\",\n }],\n }],\n});\nconst basicHub = new gcp.networkconnectivity.Hub(\"basic_hub\", {\n name: \"tf-test-hub_56529\",\n description: \"A sample hub\",\n labels: {\n \"label-two\": \"value-one\",\n },\n});\nconst primary = new gcp.networkconnectivity.Spoke(\"primary\", {\n name: \"tf-test-name_75413\",\n location: \"us-central1\",\n description: \"A sample spoke with a linked routher appliance instance\",\n labels: {\n \"label-one\": \"value-one\",\n },\n hub: basicHub.id,\n linkedRouterApplianceInstances: {\n instances: [{\n virtualMachine: instance.selfLink,\n ipAddress: \"10.0.0.2\",\n }],\n siteToSiteDataTransfer: true,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nnetwork = gcp.compute.Network(\"network\",\n name=\"tf-test-network_2067\",\n auto_create_subnetworks=False)\nsubnetwork = gcp.compute.Subnetwork(\"subnetwork\",\n name=\"tf-test-subnet_40785\",\n ip_cidr_range=\"10.0.0.0/28\",\n region=\"us-central1\",\n network=network.self_link)\ninstance = gcp.compute.Instance(\"instance\",\n name=\"tf-test-instance_79169\",\n machine_type=\"e2-medium\",\n can_ip_forward=True,\n zone=\"us-central1-a\",\n boot_disk={\n \"initialize_params\": {\n \"image\": \"projects/debian-cloud/global/images/debian-10-buster-v20210817\",\n },\n },\n network_interfaces=[{\n \"subnetwork\": subnetwork.name,\n \"network_ip\": \"10.0.0.2\",\n \"access_configs\": [{\n \"network_tier\": \"PREMIUM\",\n }],\n }])\nbasic_hub = gcp.networkconnectivity.Hub(\"basic_hub\",\n name=\"tf-test-hub_56529\",\n description=\"A sample hub\",\n labels={\n \"label-two\": \"value-one\",\n })\nprimary = gcp.networkconnectivity.Spoke(\"primary\",\n name=\"tf-test-name_75413\",\n location=\"us-central1\",\n description=\"A sample spoke with a linked routher appliance instance\",\n labels={\n \"label-one\": \"value-one\",\n },\n hub=basic_hub.id,\n linked_router_appliance_instances={\n \"instances\": [{\n \"virtual_machine\": instance.self_link,\n \"ip_address\": \"10.0.0.2\",\n }],\n \"site_to_site_data_transfer\": True,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var network = new Gcp.Compute.Network(\"network\", new()\n {\n Name = \"tf-test-network_2067\",\n AutoCreateSubnetworks = false,\n });\n\n var subnetwork = new Gcp.Compute.Subnetwork(\"subnetwork\", new()\n {\n Name = \"tf-test-subnet_40785\",\n IpCidrRange = \"10.0.0.0/28\",\n Region = \"us-central1\",\n Network = network.SelfLink,\n });\n\n var instance = new Gcp.Compute.Instance(\"instance\", new()\n {\n Name = \"tf-test-instance_79169\",\n MachineType = \"e2-medium\",\n CanIpForward = true,\n Zone = \"us-central1-a\",\n BootDisk = new Gcp.Compute.Inputs.InstanceBootDiskArgs\n {\n InitializeParams = new Gcp.Compute.Inputs.InstanceBootDiskInitializeParamsArgs\n {\n Image = \"projects/debian-cloud/global/images/debian-10-buster-v20210817\",\n },\n },\n NetworkInterfaces = new[]\n {\n new Gcp.Compute.Inputs.InstanceNetworkInterfaceArgs\n {\n Subnetwork = subnetwork.Name,\n NetworkIp = \"10.0.0.2\",\n AccessConfigs = new[]\n {\n new Gcp.Compute.Inputs.InstanceNetworkInterfaceAccessConfigArgs\n {\n NetworkTier = \"PREMIUM\",\n },\n },\n },\n },\n });\n\n var basicHub = new Gcp.NetworkConnectivity.Hub(\"basic_hub\", new()\n {\n Name = \"tf-test-hub_56529\",\n Description = \"A sample hub\",\n Labels = \n {\n { \"label-two\", \"value-one\" },\n },\n });\n\n var primary = new Gcp.NetworkConnectivity.Spoke(\"primary\", new()\n {\n Name = \"tf-test-name_75413\",\n Location = \"us-central1\",\n Description = \"A sample spoke with a linked routher appliance instance\",\n Labels = \n {\n { \"label-one\", \"value-one\" },\n },\n Hub = basicHub.Id,\n LinkedRouterApplianceInstances = new Gcp.NetworkConnectivity.Inputs.SpokeLinkedRouterApplianceInstancesArgs\n {\n Instances = new[]\n {\n new Gcp.NetworkConnectivity.Inputs.SpokeLinkedRouterApplianceInstancesInstanceArgs\n {\n VirtualMachine = instance.SelfLink,\n IpAddress = \"10.0.0.2\",\n },\n },\n SiteToSiteDataTransfer = true,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networkconnectivity\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tnetwork, err := compute.NewNetwork(ctx, \"network\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"tf-test-network_2067\"),\n\t\t\tAutoCreateSubnetworks: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsubnetwork, err := compute.NewSubnetwork(ctx, \"subnetwork\", \u0026compute.SubnetworkArgs{\n\t\t\tName: pulumi.String(\"tf-test-subnet_40785\"),\n\t\t\tIpCidrRange: pulumi.String(\"10.0.0.0/28\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tNetwork: network.SelfLink,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinstance, err := compute.NewInstance(ctx, \"instance\", \u0026compute.InstanceArgs{\n\t\t\tName: pulumi.String(\"tf-test-instance_79169\"),\n\t\t\tMachineType: pulumi.String(\"e2-medium\"),\n\t\t\tCanIpForward: pulumi.Bool(true),\n\t\t\tZone: pulumi.String(\"us-central1-a\"),\n\t\t\tBootDisk: \u0026compute.InstanceBootDiskArgs{\n\t\t\t\tInitializeParams: \u0026compute.InstanceBootDiskInitializeParamsArgs{\n\t\t\t\t\tImage: pulumi.String(\"projects/debian-cloud/global/images/debian-10-buster-v20210817\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tNetworkInterfaces: compute.InstanceNetworkInterfaceArray{\n\t\t\t\t\u0026compute.InstanceNetworkInterfaceArgs{\n\t\t\t\t\tSubnetwork: subnetwork.Name,\n\t\t\t\t\tNetworkIp: pulumi.String(\"10.0.0.2\"),\n\t\t\t\t\tAccessConfigs: compute.InstanceNetworkInterfaceAccessConfigArray{\n\t\t\t\t\t\t\u0026compute.InstanceNetworkInterfaceAccessConfigArgs{\n\t\t\t\t\t\t\tNetworkTier: pulumi.String(\"PREMIUM\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbasicHub, err := networkconnectivity.NewHub(ctx, \"basic_hub\", \u0026networkconnectivity.HubArgs{\n\t\t\tName: pulumi.String(\"tf-test-hub_56529\"),\n\t\t\tDescription: pulumi.String(\"A sample hub\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"label-two\": pulumi.String(\"value-one\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = networkconnectivity.NewSpoke(ctx, \"primary\", \u0026networkconnectivity.SpokeArgs{\n\t\t\tName: pulumi.String(\"tf-test-name_75413\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tDescription: pulumi.String(\"A sample spoke with a linked routher appliance instance\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"label-one\": pulumi.String(\"value-one\"),\n\t\t\t},\n\t\t\tHub: basicHub.ID(),\n\t\t\tLinkedRouterApplianceInstances: \u0026networkconnectivity.SpokeLinkedRouterApplianceInstancesArgs{\n\t\t\t\tInstances: networkconnectivity.SpokeLinkedRouterApplianceInstancesInstanceArray{\n\t\t\t\t\t\u0026networkconnectivity.SpokeLinkedRouterApplianceInstancesInstanceArgs{\n\t\t\t\t\t\tVirtualMachine: instance.SelfLink,\n\t\t\t\t\t\tIpAddress: pulumi.String(\"10.0.0.2\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSiteToSiteDataTransfer: pulumi.Bool(true),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.compute.Subnetwork;\nimport com.pulumi.gcp.compute.SubnetworkArgs;\nimport com.pulumi.gcp.compute.Instance;\nimport com.pulumi.gcp.compute.InstanceArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceBootDiskArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceBootDiskInitializeParamsArgs;\nimport com.pulumi.gcp.compute.inputs.InstanceNetworkInterfaceArgs;\nimport com.pulumi.gcp.networkconnectivity.Hub;\nimport com.pulumi.gcp.networkconnectivity.HubArgs;\nimport com.pulumi.gcp.networkconnectivity.Spoke;\nimport com.pulumi.gcp.networkconnectivity.SpokeArgs;\nimport com.pulumi.gcp.networkconnectivity.inputs.SpokeLinkedRouterApplianceInstancesArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var network = new Network(\"network\", NetworkArgs.builder()\n .name(\"tf-test-network_2067\")\n .autoCreateSubnetworks(false)\n .build());\n\n var subnetwork = new Subnetwork(\"subnetwork\", SubnetworkArgs.builder()\n .name(\"tf-test-subnet_40785\")\n .ipCidrRange(\"10.0.0.0/28\")\n .region(\"us-central1\")\n .network(network.selfLink())\n .build());\n\n var instance = new Instance(\"instance\", InstanceArgs.builder()\n .name(\"tf-test-instance_79169\")\n .machineType(\"e2-medium\")\n .canIpForward(true)\n .zone(\"us-central1-a\")\n .bootDisk(InstanceBootDiskArgs.builder()\n .initializeParams(InstanceBootDiskInitializeParamsArgs.builder()\n .image(\"projects/debian-cloud/global/images/debian-10-buster-v20210817\")\n .build())\n .build())\n .networkInterfaces(InstanceNetworkInterfaceArgs.builder()\n .subnetwork(subnetwork.name())\n .networkIp(\"10.0.0.2\")\n .accessConfigs(InstanceNetworkInterfaceAccessConfigArgs.builder()\n .networkTier(\"PREMIUM\")\n .build())\n .build())\n .build());\n\n var basicHub = new Hub(\"basicHub\", HubArgs.builder()\n .name(\"tf-test-hub_56529\")\n .description(\"A sample hub\")\n .labels(Map.of(\"label-two\", \"value-one\"))\n .build());\n\n var primary = new Spoke(\"primary\", SpokeArgs.builder()\n .name(\"tf-test-name_75413\")\n .location(\"us-central1\")\n .description(\"A sample spoke with a linked routher appliance instance\")\n .labels(Map.of(\"label-one\", \"value-one\"))\n .hub(basicHub.id())\n .linkedRouterApplianceInstances(SpokeLinkedRouterApplianceInstancesArgs.builder()\n .instances(SpokeLinkedRouterApplianceInstancesInstanceArgs.builder()\n .virtualMachine(instance.selfLink())\n .ipAddress(\"10.0.0.2\")\n .build())\n .siteToSiteDataTransfer(true)\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n network:\n type: gcp:compute:Network\n properties:\n name: tf-test-network_2067\n autoCreateSubnetworks: false\n subnetwork:\n type: gcp:compute:Subnetwork\n properties:\n name: tf-test-subnet_40785\n ipCidrRange: 10.0.0.0/28\n region: us-central1\n network: ${network.selfLink}\n instance:\n type: gcp:compute:Instance\n properties:\n name: tf-test-instance_79169\n machineType: e2-medium\n canIpForward: true\n zone: us-central1-a\n bootDisk:\n initializeParams:\n image: projects/debian-cloud/global/images/debian-10-buster-v20210817\n networkInterfaces:\n - subnetwork: ${subnetwork.name}\n networkIp: 10.0.0.2\n accessConfigs:\n - networkTier: PREMIUM\n basicHub:\n type: gcp:networkconnectivity:Hub\n name: basic_hub\n properties:\n name: tf-test-hub_56529\n description: A sample hub\n labels:\n label-two: value-one\n primary:\n type: gcp:networkconnectivity:Spoke\n properties:\n name: tf-test-name_75413\n location: us-central1\n description: A sample spoke with a linked routher appliance instance\n labels:\n label-one: value-one\n hub: ${basicHub.id}\n linkedRouterApplianceInstances:\n instances:\n - virtualMachine: ${instance.selfLink}\n ipAddress: 10.0.0.2\n siteToSiteDataTransfer: true\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nSpoke can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/spokes/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, Spoke can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:networkconnectivity/spoke:Spoke default projects/{{project}}/locations/{{location}}/spokes/{{name}}\n```\n\n```sh\n$ pulumi import gcp:networkconnectivity/spoke:Spoke default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:networkconnectivity/spoke:Spoke default {{location}}/{{name}}\n```\n\n", "properties": { "createTime": { "type": "string", @@ -221054,7 +222634,7 @@ } }, "gcp:networksecurity/clientTlsPolicy:ClientTlsPolicy": { - "description": "## Example Usage\n\n### Network Security Client Tls Policy Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.networksecurity.ClientTlsPolicy(\"default\", {\n name: \"my-client-tls-policy\",\n labels: {\n foo: \"bar\",\n },\n description: \"my description\",\n sni: \"secure.example.com\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.networksecurity.ClientTlsPolicy(\"default\",\n name=\"my-client-tls-policy\",\n labels={\n \"foo\": \"bar\",\n },\n description=\"my description\",\n sni=\"secure.example.com\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.NetworkSecurity.ClientTlsPolicy(\"default\", new()\n {\n Name = \"my-client-tls-policy\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Description = \"my description\",\n Sni = \"secure.example.com\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networksecurity\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := networksecurity.NewClientTlsPolicy(ctx, \"default\", \u0026networksecurity.ClientTlsPolicyArgs{\n\t\t\tName: pulumi.String(\"my-client-tls-policy\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tDescription: pulumi.String(\"my description\"),\n\t\t\tSni: pulumi.String(\"secure.example.com\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.networksecurity.ClientTlsPolicy;\nimport com.pulumi.gcp.networksecurity.ClientTlsPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new ClientTlsPolicy(\"default\", ClientTlsPolicyArgs.builder()\n .name(\"my-client-tls-policy\")\n .labels(Map.of(\"foo\", \"bar\"))\n .description(\"my description\")\n .sni(\"secure.example.com\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:networksecurity:ClientTlsPolicy\n properties:\n name: my-client-tls-policy\n labels:\n foo: bar\n description: my description\n sni: secure.example.com\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Network Security Client Tls Policy Advanced\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.networksecurity.ClientTlsPolicy(\"default\", {\n name: \"my-client-tls-policy\",\n labels: {\n foo: \"bar\",\n },\n description: \"my description\",\n clientCertificate: {\n certificateProviderInstance: {\n pluginInstance: \"google_cloud_private_spiffe\",\n },\n },\n serverValidationCas: [\n {\n grpcEndpoint: {\n targetUri: \"unix:mypath\",\n },\n },\n {\n grpcEndpoint: {\n targetUri: \"unix:mypath1\",\n },\n },\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.networksecurity.ClientTlsPolicy(\"default\",\n name=\"my-client-tls-policy\",\n labels={\n \"foo\": \"bar\",\n },\n description=\"my description\",\n client_certificate={\n \"certificate_provider_instance\": {\n \"plugin_instance\": \"google_cloud_private_spiffe\",\n },\n },\n server_validation_cas=[\n {\n \"grpc_endpoint\": {\n \"target_uri\": \"unix:mypath\",\n },\n },\n {\n \"grpc_endpoint\": {\n \"target_uri\": \"unix:mypath1\",\n },\n },\n ])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.NetworkSecurity.ClientTlsPolicy(\"default\", new()\n {\n Name = \"my-client-tls-policy\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Description = \"my description\",\n ClientCertificate = new Gcp.NetworkSecurity.Inputs.ClientTlsPolicyClientCertificateArgs\n {\n CertificateProviderInstance = new Gcp.NetworkSecurity.Inputs.ClientTlsPolicyClientCertificateCertificateProviderInstanceArgs\n {\n PluginInstance = \"google_cloud_private_spiffe\",\n },\n },\n ServerValidationCas = new[]\n {\n new Gcp.NetworkSecurity.Inputs.ClientTlsPolicyServerValidationCaArgs\n {\n GrpcEndpoint = new Gcp.NetworkSecurity.Inputs.ClientTlsPolicyServerValidationCaGrpcEndpointArgs\n {\n TargetUri = \"unix:mypath\",\n },\n },\n new Gcp.NetworkSecurity.Inputs.ClientTlsPolicyServerValidationCaArgs\n {\n GrpcEndpoint = new Gcp.NetworkSecurity.Inputs.ClientTlsPolicyServerValidationCaGrpcEndpointArgs\n {\n TargetUri = \"unix:mypath1\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networksecurity\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := networksecurity.NewClientTlsPolicy(ctx, \"default\", \u0026networksecurity.ClientTlsPolicyArgs{\n\t\t\tName: pulumi.String(\"my-client-tls-policy\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tDescription: pulumi.String(\"my description\"),\n\t\t\tClientCertificate: \u0026networksecurity.ClientTlsPolicyClientCertificateArgs{\n\t\t\t\tCertificateProviderInstance: \u0026networksecurity.ClientTlsPolicyClientCertificateCertificateProviderInstanceArgs{\n\t\t\t\t\tPluginInstance: pulumi.String(\"google_cloud_private_spiffe\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tServerValidationCas: networksecurity.ClientTlsPolicyServerValidationCaArray{\n\t\t\t\t\u0026networksecurity.ClientTlsPolicyServerValidationCaArgs{\n\t\t\t\t\tGrpcEndpoint: \u0026networksecurity.ClientTlsPolicyServerValidationCaGrpcEndpointArgs{\n\t\t\t\t\t\tTargetUri: pulumi.String(\"unix:mypath\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\u0026networksecurity.ClientTlsPolicyServerValidationCaArgs{\n\t\t\t\t\tGrpcEndpoint: \u0026networksecurity.ClientTlsPolicyServerValidationCaGrpcEndpointArgs{\n\t\t\t\t\t\tTargetUri: pulumi.String(\"unix:mypath1\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.networksecurity.ClientTlsPolicy;\nimport com.pulumi.gcp.networksecurity.ClientTlsPolicyArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ClientTlsPolicyClientCertificateArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ClientTlsPolicyClientCertificateCertificateProviderInstanceArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ClientTlsPolicyServerValidationCaArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ClientTlsPolicyServerValidationCaGrpcEndpointArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new ClientTlsPolicy(\"default\", ClientTlsPolicyArgs.builder()\n .name(\"my-client-tls-policy\")\n .labels(Map.of(\"foo\", \"bar\"))\n .description(\"my description\")\n .clientCertificate(ClientTlsPolicyClientCertificateArgs.builder()\n .certificateProviderInstance(ClientTlsPolicyClientCertificateCertificateProviderInstanceArgs.builder()\n .pluginInstance(\"google_cloud_private_spiffe\")\n .build())\n .build())\n .serverValidationCas( \n ClientTlsPolicyServerValidationCaArgs.builder()\n .grpcEndpoint(ClientTlsPolicyServerValidationCaGrpcEndpointArgs.builder()\n .targetUri(\"unix:mypath\")\n .build())\n .build(),\n ClientTlsPolicyServerValidationCaArgs.builder()\n .grpcEndpoint(ClientTlsPolicyServerValidationCaGrpcEndpointArgs.builder()\n .targetUri(\"unix:mypath1\")\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:networksecurity:ClientTlsPolicy\n properties:\n name: my-client-tls-policy\n labels:\n foo: bar\n description: my description\n clientCertificate:\n certificateProviderInstance:\n pluginInstance: google_cloud_private_spiffe\n serverValidationCas:\n - grpcEndpoint:\n targetUri: unix:mypath\n - grpcEndpoint:\n targetUri: unix:mypath1\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nClientTlsPolicy can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/clientTlsPolicies/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, ClientTlsPolicy can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:networksecurity/clientTlsPolicy:ClientTlsPolicy default projects/{{project}}/locations/{{location}}/clientTlsPolicies/{{name}}\n```\n\n```sh\n$ pulumi import gcp:networksecurity/clientTlsPolicy:ClientTlsPolicy default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:networksecurity/clientTlsPolicy:ClientTlsPolicy default {{location}}/{{name}}\n```\n\n", + "description": "ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource.\n\n\nTo get more information about ClientTlsPolicy, see:\n\n* [API documentation](https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.clientTlsPolicies)\n* How-to Guides\n * [Service Security](https://cloud.google.com/traffic-director/docs/security-use-cases)\n\n## Example Usage\n\n### Network Security Client Tls Policy Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.networksecurity.ClientTlsPolicy(\"default\", {\n name: \"my-client-tls-policy\",\n labels: {\n foo: \"bar\",\n },\n description: \"my description\",\n sni: \"secure.example.com\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.networksecurity.ClientTlsPolicy(\"default\",\n name=\"my-client-tls-policy\",\n labels={\n \"foo\": \"bar\",\n },\n description=\"my description\",\n sni=\"secure.example.com\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.NetworkSecurity.ClientTlsPolicy(\"default\", new()\n {\n Name = \"my-client-tls-policy\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Description = \"my description\",\n Sni = \"secure.example.com\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networksecurity\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := networksecurity.NewClientTlsPolicy(ctx, \"default\", \u0026networksecurity.ClientTlsPolicyArgs{\n\t\t\tName: pulumi.String(\"my-client-tls-policy\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tDescription: pulumi.String(\"my description\"),\n\t\t\tSni: pulumi.String(\"secure.example.com\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.networksecurity.ClientTlsPolicy;\nimport com.pulumi.gcp.networksecurity.ClientTlsPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new ClientTlsPolicy(\"default\", ClientTlsPolicyArgs.builder()\n .name(\"my-client-tls-policy\")\n .labels(Map.of(\"foo\", \"bar\"))\n .description(\"my description\")\n .sni(\"secure.example.com\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:networksecurity:ClientTlsPolicy\n properties:\n name: my-client-tls-policy\n labels:\n foo: bar\n description: my description\n sni: secure.example.com\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Network Security Client Tls Policy Advanced\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.networksecurity.ClientTlsPolicy(\"default\", {\n name: \"my-client-tls-policy\",\n labels: {\n foo: \"bar\",\n },\n description: \"my description\",\n clientCertificate: {\n certificateProviderInstance: {\n pluginInstance: \"google_cloud_private_spiffe\",\n },\n },\n serverValidationCas: [{\n grpcEndpoint: {\n targetUri: \"unix:mypath\",\n },\n }],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.networksecurity.ClientTlsPolicy(\"default\",\n name=\"my-client-tls-policy\",\n labels={\n \"foo\": \"bar\",\n },\n description=\"my description\",\n client_certificate={\n \"certificate_provider_instance\": {\n \"plugin_instance\": \"google_cloud_private_spiffe\",\n },\n },\n server_validation_cas=[{\n \"grpc_endpoint\": {\n \"target_uri\": \"unix:mypath\",\n },\n }])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.NetworkSecurity.ClientTlsPolicy(\"default\", new()\n {\n Name = \"my-client-tls-policy\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Description = \"my description\",\n ClientCertificate = new Gcp.NetworkSecurity.Inputs.ClientTlsPolicyClientCertificateArgs\n {\n CertificateProviderInstance = new Gcp.NetworkSecurity.Inputs.ClientTlsPolicyClientCertificateCertificateProviderInstanceArgs\n {\n PluginInstance = \"google_cloud_private_spiffe\",\n },\n },\n ServerValidationCas = new[]\n {\n new Gcp.NetworkSecurity.Inputs.ClientTlsPolicyServerValidationCaArgs\n {\n GrpcEndpoint = new Gcp.NetworkSecurity.Inputs.ClientTlsPolicyServerValidationCaGrpcEndpointArgs\n {\n TargetUri = \"unix:mypath\",\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networksecurity\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := networksecurity.NewClientTlsPolicy(ctx, \"default\", \u0026networksecurity.ClientTlsPolicyArgs{\n\t\t\tName: pulumi.String(\"my-client-tls-policy\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tDescription: pulumi.String(\"my description\"),\n\t\t\tClientCertificate: \u0026networksecurity.ClientTlsPolicyClientCertificateArgs{\n\t\t\t\tCertificateProviderInstance: \u0026networksecurity.ClientTlsPolicyClientCertificateCertificateProviderInstanceArgs{\n\t\t\t\t\tPluginInstance: pulumi.String(\"google_cloud_private_spiffe\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tServerValidationCas: networksecurity.ClientTlsPolicyServerValidationCaArray{\n\t\t\t\t\u0026networksecurity.ClientTlsPolicyServerValidationCaArgs{\n\t\t\t\t\tGrpcEndpoint: \u0026networksecurity.ClientTlsPolicyServerValidationCaGrpcEndpointArgs{\n\t\t\t\t\t\tTargetUri: pulumi.String(\"unix:mypath\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.networksecurity.ClientTlsPolicy;\nimport com.pulumi.gcp.networksecurity.ClientTlsPolicyArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ClientTlsPolicyClientCertificateArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ClientTlsPolicyClientCertificateCertificateProviderInstanceArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ClientTlsPolicyServerValidationCaArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ClientTlsPolicyServerValidationCaGrpcEndpointArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new ClientTlsPolicy(\"default\", ClientTlsPolicyArgs.builder()\n .name(\"my-client-tls-policy\")\n .labels(Map.of(\"foo\", \"bar\"))\n .description(\"my description\")\n .clientCertificate(ClientTlsPolicyClientCertificateArgs.builder()\n .certificateProviderInstance(ClientTlsPolicyClientCertificateCertificateProviderInstanceArgs.builder()\n .pluginInstance(\"google_cloud_private_spiffe\")\n .build())\n .build())\n .serverValidationCas(ClientTlsPolicyServerValidationCaArgs.builder()\n .grpcEndpoint(ClientTlsPolicyServerValidationCaGrpcEndpointArgs.builder()\n .targetUri(\"unix:mypath\")\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:networksecurity:ClientTlsPolicy\n properties:\n name: my-client-tls-policy\n labels:\n foo: bar\n description: my description\n clientCertificate:\n certificateProviderInstance:\n pluginInstance: google_cloud_private_spiffe\n serverValidationCas:\n - grpcEndpoint:\n targetUri: unix:mypath\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nClientTlsPolicy can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/clientTlsPolicies/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, ClientTlsPolicy can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:networksecurity/clientTlsPolicy:ClientTlsPolicy default projects/{{project}}/locations/{{location}}/clientTlsPolicies/{{name}}\n```\n\n```sh\n$ pulumi import gcp:networksecurity/clientTlsPolicy:ClientTlsPolicy default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:networksecurity/clientTlsPolicy:ClientTlsPolicy default {{location}}/{{name}}\n```\n\n", "properties": { "clientCertificate": { "$ref": "#/types/gcp:networksecurity/ClientTlsPolicyClientCertificate:ClientTlsPolicyClientCertificate", @@ -222307,7 +223887,7 @@ } }, "gcp:networksecurity/serverTlsPolicy:ServerTlsPolicy": { - "description": "## Example Usage\n\n### Network Security Server Tls Policy Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.networksecurity.ServerTlsPolicy(\"default\", {\n name: \"my-server-tls-policy\",\n labels: {\n foo: \"bar\",\n },\n description: \"my description\",\n allowOpen: false,\n serverCertificate: {\n certificateProviderInstance: {\n pluginInstance: \"google_cloud_private_spiffe\",\n },\n },\n mtlsPolicy: {\n clientValidationCas: [\n {\n grpcEndpoint: {\n targetUri: \"unix:mypath\",\n },\n },\n {\n grpcEndpoint: {\n targetUri: \"unix:abc/mypath\",\n },\n },\n {\n certificateProviderInstance: {\n pluginInstance: \"google_cloud_private_spiffe\",\n },\n },\n ],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.networksecurity.ServerTlsPolicy(\"default\",\n name=\"my-server-tls-policy\",\n labels={\n \"foo\": \"bar\",\n },\n description=\"my description\",\n allow_open=False,\n server_certificate={\n \"certificate_provider_instance\": {\n \"plugin_instance\": \"google_cloud_private_spiffe\",\n },\n },\n mtls_policy={\n \"client_validation_cas\": [\n {\n \"grpc_endpoint\": {\n \"target_uri\": \"unix:mypath\",\n },\n },\n {\n \"grpc_endpoint\": {\n \"target_uri\": \"unix:abc/mypath\",\n },\n },\n {\n \"certificate_provider_instance\": {\n \"plugin_instance\": \"google_cloud_private_spiffe\",\n },\n },\n ],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.NetworkSecurity.ServerTlsPolicy(\"default\", new()\n {\n Name = \"my-server-tls-policy\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Description = \"my description\",\n AllowOpen = false,\n ServerCertificate = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyServerCertificateArgs\n {\n CertificateProviderInstance = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyServerCertificateCertificateProviderInstanceArgs\n {\n PluginInstance = \"google_cloud_private_spiffe\",\n },\n },\n MtlsPolicy = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyArgs\n {\n ClientValidationCas = new[]\n {\n new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyClientValidationCaArgs\n {\n GrpcEndpoint = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs\n {\n TargetUri = \"unix:mypath\",\n },\n },\n new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyClientValidationCaArgs\n {\n GrpcEndpoint = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs\n {\n TargetUri = \"unix:abc/mypath\",\n },\n },\n new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyClientValidationCaArgs\n {\n CertificateProviderInstance = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstanceArgs\n {\n PluginInstance = \"google_cloud_private_spiffe\",\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networksecurity\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := networksecurity.NewServerTlsPolicy(ctx, \"default\", \u0026networksecurity.ServerTlsPolicyArgs{\n\t\t\tName: pulumi.String(\"my-server-tls-policy\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tDescription: pulumi.String(\"my description\"),\n\t\t\tAllowOpen: pulumi.Bool(false),\n\t\t\tServerCertificate: \u0026networksecurity.ServerTlsPolicyServerCertificateArgs{\n\t\t\t\tCertificateProviderInstance: \u0026networksecurity.ServerTlsPolicyServerCertificateCertificateProviderInstanceArgs{\n\t\t\t\t\tPluginInstance: pulumi.String(\"google_cloud_private_spiffe\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tMtlsPolicy: \u0026networksecurity.ServerTlsPolicyMtlsPolicyArgs{\n\t\t\t\tClientValidationCas: networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaArray{\n\t\t\t\t\t\u0026networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaArgs{\n\t\t\t\t\t\tGrpcEndpoint: \u0026networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs{\n\t\t\t\t\t\t\tTargetUri: pulumi.String(\"unix:mypath\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\u0026networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaArgs{\n\t\t\t\t\t\tGrpcEndpoint: \u0026networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs{\n\t\t\t\t\t\t\tTargetUri: pulumi.String(\"unix:abc/mypath\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\u0026networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaArgs{\n\t\t\t\t\t\tCertificateProviderInstance: \u0026networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstanceArgs{\n\t\t\t\t\t\t\tPluginInstance: pulumi.String(\"google_cloud_private_spiffe\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicy;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicyArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ServerTlsPolicyServerCertificateArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ServerTlsPolicyServerCertificateCertificateProviderInstanceArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ServerTlsPolicyMtlsPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new ServerTlsPolicy(\"default\", ServerTlsPolicyArgs.builder()\n .name(\"my-server-tls-policy\")\n .labels(Map.of(\"foo\", \"bar\"))\n .description(\"my description\")\n .allowOpen(\"false\")\n .serverCertificate(ServerTlsPolicyServerCertificateArgs.builder()\n .certificateProviderInstance(ServerTlsPolicyServerCertificateCertificateProviderInstanceArgs.builder()\n .pluginInstance(\"google_cloud_private_spiffe\")\n .build())\n .build())\n .mtlsPolicy(ServerTlsPolicyMtlsPolicyArgs.builder()\n .clientValidationCas( \n ServerTlsPolicyMtlsPolicyClientValidationCaArgs.builder()\n .grpcEndpoint(ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs.builder()\n .targetUri(\"unix:mypath\")\n .build())\n .build(),\n ServerTlsPolicyMtlsPolicyClientValidationCaArgs.builder()\n .grpcEndpoint(ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs.builder()\n .targetUri(\"unix:abc/mypath\")\n .build())\n .build(),\n ServerTlsPolicyMtlsPolicyClientValidationCaArgs.builder()\n .certificateProviderInstance(ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstanceArgs.builder()\n .pluginInstance(\"google_cloud_private_spiffe\")\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:networksecurity:ServerTlsPolicy\n properties:\n name: my-server-tls-policy\n labels:\n foo: bar\n description: my description\n allowOpen: 'false'\n serverCertificate:\n certificateProviderInstance:\n pluginInstance: google_cloud_private_spiffe\n mtlsPolicy:\n clientValidationCas:\n - grpcEndpoint:\n targetUri: unix:mypath\n - grpcEndpoint:\n targetUri: unix:abc/mypath\n - certificateProviderInstance:\n pluginInstance: google_cloud_private_spiffe\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Network Security Server Tls Policy Advanced\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.networksecurity.ServerTlsPolicy(\"default\", {\n name: \"my-server-tls-policy\",\n labels: {\n foo: \"bar\",\n },\n description: \"my description\",\n location: \"global\",\n allowOpen: false,\n mtlsPolicy: {\n clientValidationMode: \"ALLOW_INVALID_OR_MISSING_CLIENT_CERT\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.networksecurity.ServerTlsPolicy(\"default\",\n name=\"my-server-tls-policy\",\n labels={\n \"foo\": \"bar\",\n },\n description=\"my description\",\n location=\"global\",\n allow_open=False,\n mtls_policy={\n \"client_validation_mode\": \"ALLOW_INVALID_OR_MISSING_CLIENT_CERT\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.NetworkSecurity.ServerTlsPolicy(\"default\", new()\n {\n Name = \"my-server-tls-policy\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Description = \"my description\",\n Location = \"global\",\n AllowOpen = false,\n MtlsPolicy = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyArgs\n {\n ClientValidationMode = \"ALLOW_INVALID_OR_MISSING_CLIENT_CERT\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networksecurity\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := networksecurity.NewServerTlsPolicy(ctx, \"default\", \u0026networksecurity.ServerTlsPolicyArgs{\n\t\t\tName: pulumi.String(\"my-server-tls-policy\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tDescription: pulumi.String(\"my description\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tAllowOpen: pulumi.Bool(false),\n\t\t\tMtlsPolicy: \u0026networksecurity.ServerTlsPolicyMtlsPolicyArgs{\n\t\t\t\tClientValidationMode: pulumi.String(\"ALLOW_INVALID_OR_MISSING_CLIENT_CERT\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicy;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicyArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ServerTlsPolicyMtlsPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new ServerTlsPolicy(\"default\", ServerTlsPolicyArgs.builder()\n .name(\"my-server-tls-policy\")\n .labels(Map.of(\"foo\", \"bar\"))\n .description(\"my description\")\n .location(\"global\")\n .allowOpen(\"false\")\n .mtlsPolicy(ServerTlsPolicyMtlsPolicyArgs.builder()\n .clientValidationMode(\"ALLOW_INVALID_OR_MISSING_CLIENT_CERT\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:networksecurity:ServerTlsPolicy\n properties:\n name: my-server-tls-policy\n labels:\n foo: bar\n description: my description\n location: global\n allowOpen: 'false'\n mtlsPolicy:\n clientValidationMode: ALLOW_INVALID_OR_MISSING_CLIENT_CERT\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Network Security Server Tls Policy Server Cert\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.networksecurity.ServerTlsPolicy(\"default\", {\n name: \"my-server-tls-policy\",\n labels: {\n foo: \"bar\",\n },\n description: \"my description\",\n location: \"global\",\n allowOpen: false,\n serverCertificate: {\n grpcEndpoint: {\n targetUri: \"unix:mypath\",\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.networksecurity.ServerTlsPolicy(\"default\",\n name=\"my-server-tls-policy\",\n labels={\n \"foo\": \"bar\",\n },\n description=\"my description\",\n location=\"global\",\n allow_open=False,\n server_certificate={\n \"grpc_endpoint\": {\n \"target_uri\": \"unix:mypath\",\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.NetworkSecurity.ServerTlsPolicy(\"default\", new()\n {\n Name = \"my-server-tls-policy\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Description = \"my description\",\n Location = \"global\",\n AllowOpen = false,\n ServerCertificate = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyServerCertificateArgs\n {\n GrpcEndpoint = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyServerCertificateGrpcEndpointArgs\n {\n TargetUri = \"unix:mypath\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networksecurity\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := networksecurity.NewServerTlsPolicy(ctx, \"default\", \u0026networksecurity.ServerTlsPolicyArgs{\n\t\t\tName: pulumi.String(\"my-server-tls-policy\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tDescription: pulumi.String(\"my description\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tAllowOpen: pulumi.Bool(false),\n\t\t\tServerCertificate: \u0026networksecurity.ServerTlsPolicyServerCertificateArgs{\n\t\t\t\tGrpcEndpoint: \u0026networksecurity.ServerTlsPolicyServerCertificateGrpcEndpointArgs{\n\t\t\t\t\tTargetUri: pulumi.String(\"unix:mypath\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicy;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicyArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ServerTlsPolicyServerCertificateArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ServerTlsPolicyServerCertificateGrpcEndpointArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new ServerTlsPolicy(\"default\", ServerTlsPolicyArgs.builder()\n .name(\"my-server-tls-policy\")\n .labels(Map.of(\"foo\", \"bar\"))\n .description(\"my description\")\n .location(\"global\")\n .allowOpen(\"false\")\n .serverCertificate(ServerTlsPolicyServerCertificateArgs.builder()\n .grpcEndpoint(ServerTlsPolicyServerCertificateGrpcEndpointArgs.builder()\n .targetUri(\"unix:mypath\")\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:networksecurity:ServerTlsPolicy\n properties:\n name: my-server-tls-policy\n labels:\n foo: bar\n description: my description\n location: global\n allowOpen: 'false'\n serverCertificate:\n grpcEndpoint:\n targetUri: unix:mypath\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Network Security Server Tls Policy Mtls\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as std from \"@pulumi/std\";\n\nconst project = gcp.organizations.getProject({});\nconst defaultTrustConfig = new gcp.certificatemanager.TrustConfig(\"default\", {\n name: \"my-trust-config\",\n description: \"sample trust config description\",\n location: \"global\",\n trustStores: [{\n trustAnchors: [{\n pemCertificate: std.file({\n input: \"test-fixtures/ca_cert.pem\",\n }).then(invoke =\u003e invoke.result),\n }],\n intermediateCas: [{\n pemCertificate: std.file({\n input: \"test-fixtures/ca_cert.pem\",\n }).then(invoke =\u003e invoke.result),\n }],\n }],\n labels: {\n foo: \"bar\",\n },\n});\nconst _default = new gcp.networksecurity.ServerTlsPolicy(\"default\", {\n name: \"my-server-tls-policy\",\n description: \"my description\",\n location: \"global\",\n allowOpen: false,\n mtlsPolicy: {\n clientValidationMode: \"REJECT_INVALID\",\n clientValidationTrustConfig: pulumi.all([project, defaultTrustConfig.name]).apply(([project, name]) =\u003e `projects/${project.number}/locations/global/trustConfigs/${name}`),\n },\n labels: {\n foo: \"bar\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_std as std\n\nproject = gcp.organizations.get_project()\ndefault_trust_config = gcp.certificatemanager.TrustConfig(\"default\",\n name=\"my-trust-config\",\n description=\"sample trust config description\",\n location=\"global\",\n trust_stores=[{\n \"trust_anchors\": [{\n \"pem_certificate\": std.file(input=\"test-fixtures/ca_cert.pem\").result,\n }],\n \"intermediate_cas\": [{\n \"pem_certificate\": std.file(input=\"test-fixtures/ca_cert.pem\").result,\n }],\n }],\n labels={\n \"foo\": \"bar\",\n })\ndefault = gcp.networksecurity.ServerTlsPolicy(\"default\",\n name=\"my-server-tls-policy\",\n description=\"my description\",\n location=\"global\",\n allow_open=False,\n mtls_policy={\n \"client_validation_mode\": \"REJECT_INVALID\",\n \"client_validation_trust_config\": default_trust_config.name.apply(lambda name: f\"projects/{project.number}/locations/global/trustConfigs/{name}\"),\n },\n labels={\n \"foo\": \"bar\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Std = Pulumi.Std;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var defaultTrustConfig = new Gcp.CertificateManager.TrustConfig(\"default\", new()\n {\n Name = \"my-trust-config\",\n Description = \"sample trust config description\",\n Location = \"global\",\n TrustStores = new[]\n {\n new Gcp.CertificateManager.Inputs.TrustConfigTrustStoreArgs\n {\n TrustAnchors = new[]\n {\n new Gcp.CertificateManager.Inputs.TrustConfigTrustStoreTrustAnchorArgs\n {\n PemCertificate = Std.File.Invoke(new()\n {\n Input = \"test-fixtures/ca_cert.pem\",\n }).Apply(invoke =\u003e invoke.Result),\n },\n },\n IntermediateCas = new[]\n {\n new Gcp.CertificateManager.Inputs.TrustConfigTrustStoreIntermediateCaArgs\n {\n PemCertificate = Std.File.Invoke(new()\n {\n Input = \"test-fixtures/ca_cert.pem\",\n }).Apply(invoke =\u003e invoke.Result),\n },\n },\n },\n },\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n });\n\n var @default = new Gcp.NetworkSecurity.ServerTlsPolicy(\"default\", new()\n {\n Name = \"my-server-tls-policy\",\n Description = \"my description\",\n Location = \"global\",\n AllowOpen = false,\n MtlsPolicy = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyArgs\n {\n ClientValidationMode = \"REJECT_INVALID\",\n ClientValidationTrustConfig = Output.Tuple(project, defaultTrustConfig.Name).Apply(values =\u003e\n {\n var project = values.Item1;\n var name = values.Item2;\n return $\"projects/{project.Apply(getProjectResult =\u003e getProjectResult.Number)}/locations/global/trustConfigs/{name}\";\n }),\n },\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/certificatemanager\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networksecurity\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-std/sdk/go/std\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinvokeFile, err := std.File(ctx, \u0026std.FileArgs{\n\t\t\tInput: \"test-fixtures/ca_cert.pem\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinvokeFile1, err := std.File(ctx, \u0026std.FileArgs{\n\t\t\tInput: \"test-fixtures/ca_cert.pem\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefaultTrustConfig, err := certificatemanager.NewTrustConfig(ctx, \"default\", \u0026certificatemanager.TrustConfigArgs{\n\t\t\tName: pulumi.String(\"my-trust-config\"),\n\t\t\tDescription: pulumi.String(\"sample trust config description\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tTrustStores: certificatemanager.TrustConfigTrustStoreArray{\n\t\t\t\t\u0026certificatemanager.TrustConfigTrustStoreArgs{\n\t\t\t\t\tTrustAnchors: certificatemanager.TrustConfigTrustStoreTrustAnchorArray{\n\t\t\t\t\t\t\u0026certificatemanager.TrustConfigTrustStoreTrustAnchorArgs{\n\t\t\t\t\t\t\tPemCertificate: pulumi.String(invokeFile.Result),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tIntermediateCas: certificatemanager.TrustConfigTrustStoreIntermediateCaArray{\n\t\t\t\t\t\t\u0026certificatemanager.TrustConfigTrustStoreIntermediateCaArgs{\n\t\t\t\t\t\t\tPemCertificate: pulumi.String(invokeFile1.Result),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = networksecurity.NewServerTlsPolicy(ctx, \"default\", \u0026networksecurity.ServerTlsPolicyArgs{\n\t\t\tName: pulumi.String(\"my-server-tls-policy\"),\n\t\t\tDescription: pulumi.String(\"my description\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tAllowOpen: pulumi.Bool(false),\n\t\t\tMtlsPolicy: \u0026networksecurity.ServerTlsPolicyMtlsPolicyArgs{\n\t\t\t\tClientValidationMode: pulumi.String(\"REJECT_INVALID\"),\n\t\t\t\tClientValidationTrustConfig: defaultTrustConfig.Name.ApplyT(func(name string) (string, error) {\n\t\t\t\t\treturn fmt.Sprintf(\"projects/%v/locations/global/trustConfigs/%v\", project.Number, name), nil\n\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t},\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.certificatemanager.TrustConfig;\nimport com.pulumi.gcp.certificatemanager.TrustConfigArgs;\nimport com.pulumi.gcp.certificatemanager.inputs.TrustConfigTrustStoreArgs;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicy;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicyArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ServerTlsPolicyMtlsPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var defaultTrustConfig = new TrustConfig(\"defaultTrustConfig\", TrustConfigArgs.builder()\n .name(\"my-trust-config\")\n .description(\"sample trust config description\")\n .location(\"global\")\n .trustStores(TrustConfigTrustStoreArgs.builder()\n .trustAnchors(TrustConfigTrustStoreTrustAnchorArgs.builder()\n .pemCertificate(StdFunctions.file(FileArgs.builder()\n .input(\"test-fixtures/ca_cert.pem\")\n .build()).result())\n .build())\n .intermediateCas(TrustConfigTrustStoreIntermediateCaArgs.builder()\n .pemCertificate(StdFunctions.file(FileArgs.builder()\n .input(\"test-fixtures/ca_cert.pem\")\n .build()).result())\n .build())\n .build())\n .labels(Map.of(\"foo\", \"bar\"))\n .build());\n\n var default_ = new ServerTlsPolicy(\"default\", ServerTlsPolicyArgs.builder()\n .name(\"my-server-tls-policy\")\n .description(\"my description\")\n .location(\"global\")\n .allowOpen(\"false\")\n .mtlsPolicy(ServerTlsPolicyMtlsPolicyArgs.builder()\n .clientValidationMode(\"REJECT_INVALID\")\n .clientValidationTrustConfig(defaultTrustConfig.name().applyValue(name -\u003e String.format(\"projects/%s/locations/global/trustConfigs/%s\", project.applyValue(getProjectResult -\u003e getProjectResult.number()),name)))\n .build())\n .labels(Map.of(\"foo\", \"bar\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:networksecurity:ServerTlsPolicy\n properties:\n name: my-server-tls-policy\n description: my description\n location: global\n allowOpen: 'false'\n mtlsPolicy:\n clientValidationMode: REJECT_INVALID\n clientValidationTrustConfig: projects/${project.number}/locations/global/trustConfigs/${defaultTrustConfig.name}\n labels:\n foo: bar\n defaultTrustConfig:\n type: gcp:certificatemanager:TrustConfig\n name: default\n properties:\n name: my-trust-config\n description: sample trust config description\n location: global\n trustStores:\n - trustAnchors:\n - pemCertificate:\n fn::invoke:\n Function: std:file\n Arguments:\n input: test-fixtures/ca_cert.pem\n Return: result\n intermediateCas:\n - pemCertificate:\n fn::invoke:\n Function: std:file\n Arguments:\n input: test-fixtures/ca_cert.pem\n Return: result\n labels:\n foo: bar\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nServerTlsPolicy can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, ServerTlsPolicy can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:networksecurity/serverTlsPolicy:ServerTlsPolicy default projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{name}}\n```\n\n```sh\n$ pulumi import gcp:networksecurity/serverTlsPolicy:ServerTlsPolicy default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:networksecurity/serverTlsPolicy:ServerTlsPolicy default {{location}}/{{name}}\n```\n\n", + "description": "ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource.\n\n\nTo get more information about ServerTlsPolicy, see:\n\n* [API documentation](https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.serverTlsPolicies)\n\n## Example Usage\n\n### Network Security Server Tls Policy Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.networksecurity.ServerTlsPolicy(\"default\", {\n name: \"my-server-tls-policy\",\n labels: {\n foo: \"bar\",\n },\n description: \"my description\",\n allowOpen: false,\n serverCertificate: {\n certificateProviderInstance: {\n pluginInstance: \"google_cloud_private_spiffe\",\n },\n },\n mtlsPolicy: {\n clientValidationCas: [{\n grpcEndpoint: {\n targetUri: \"unix:mypath\",\n },\n }],\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.networksecurity.ServerTlsPolicy(\"default\",\n name=\"my-server-tls-policy\",\n labels={\n \"foo\": \"bar\",\n },\n description=\"my description\",\n allow_open=False,\n server_certificate={\n \"certificate_provider_instance\": {\n \"plugin_instance\": \"google_cloud_private_spiffe\",\n },\n },\n mtls_policy={\n \"client_validation_cas\": [{\n \"grpc_endpoint\": {\n \"target_uri\": \"unix:mypath\",\n },\n }],\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.NetworkSecurity.ServerTlsPolicy(\"default\", new()\n {\n Name = \"my-server-tls-policy\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Description = \"my description\",\n AllowOpen = false,\n ServerCertificate = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyServerCertificateArgs\n {\n CertificateProviderInstance = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyServerCertificateCertificateProviderInstanceArgs\n {\n PluginInstance = \"google_cloud_private_spiffe\",\n },\n },\n MtlsPolicy = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyArgs\n {\n ClientValidationCas = new[]\n {\n new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyClientValidationCaArgs\n {\n GrpcEndpoint = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs\n {\n TargetUri = \"unix:mypath\",\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networksecurity\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := networksecurity.NewServerTlsPolicy(ctx, \"default\", \u0026networksecurity.ServerTlsPolicyArgs{\n\t\t\tName: pulumi.String(\"my-server-tls-policy\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tDescription: pulumi.String(\"my description\"),\n\t\t\tAllowOpen: pulumi.Bool(false),\n\t\t\tServerCertificate: \u0026networksecurity.ServerTlsPolicyServerCertificateArgs{\n\t\t\t\tCertificateProviderInstance: \u0026networksecurity.ServerTlsPolicyServerCertificateCertificateProviderInstanceArgs{\n\t\t\t\t\tPluginInstance: pulumi.String(\"google_cloud_private_spiffe\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tMtlsPolicy: \u0026networksecurity.ServerTlsPolicyMtlsPolicyArgs{\n\t\t\t\tClientValidationCas: networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaArray{\n\t\t\t\t\t\u0026networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaArgs{\n\t\t\t\t\t\tGrpcEndpoint: \u0026networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs{\n\t\t\t\t\t\t\tTargetUri: pulumi.String(\"unix:mypath\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicy;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicyArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ServerTlsPolicyServerCertificateArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ServerTlsPolicyServerCertificateCertificateProviderInstanceArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ServerTlsPolicyMtlsPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new ServerTlsPolicy(\"default\", ServerTlsPolicyArgs.builder()\n .name(\"my-server-tls-policy\")\n .labels(Map.of(\"foo\", \"bar\"))\n .description(\"my description\")\n .allowOpen(\"false\")\n .serverCertificate(ServerTlsPolicyServerCertificateArgs.builder()\n .certificateProviderInstance(ServerTlsPolicyServerCertificateCertificateProviderInstanceArgs.builder()\n .pluginInstance(\"google_cloud_private_spiffe\")\n .build())\n .build())\n .mtlsPolicy(ServerTlsPolicyMtlsPolicyArgs.builder()\n .clientValidationCas(ServerTlsPolicyMtlsPolicyClientValidationCaArgs.builder()\n .grpcEndpoint(ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs.builder()\n .targetUri(\"unix:mypath\")\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:networksecurity:ServerTlsPolicy\n properties:\n name: my-server-tls-policy\n labels:\n foo: bar\n description: my description\n allowOpen: 'false'\n serverCertificate:\n certificateProviderInstance:\n pluginInstance: google_cloud_private_spiffe\n mtlsPolicy:\n clientValidationCas:\n - grpcEndpoint:\n targetUri: unix:mypath\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Network Security Server Tls Policy Advanced\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.networksecurity.ServerTlsPolicy(\"default\", {\n name: \"my-server-tls-policy\",\n labels: {\n foo: \"bar\",\n },\n description: \"my description\",\n location: \"global\",\n allowOpen: false,\n mtlsPolicy: {\n clientValidationMode: \"ALLOW_INVALID_OR_MISSING_CLIENT_CERT\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.networksecurity.ServerTlsPolicy(\"default\",\n name=\"my-server-tls-policy\",\n labels={\n \"foo\": \"bar\",\n },\n description=\"my description\",\n location=\"global\",\n allow_open=False,\n mtls_policy={\n \"client_validation_mode\": \"ALLOW_INVALID_OR_MISSING_CLIENT_CERT\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.NetworkSecurity.ServerTlsPolicy(\"default\", new()\n {\n Name = \"my-server-tls-policy\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Description = \"my description\",\n Location = \"global\",\n AllowOpen = false,\n MtlsPolicy = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyArgs\n {\n ClientValidationMode = \"ALLOW_INVALID_OR_MISSING_CLIENT_CERT\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networksecurity\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := networksecurity.NewServerTlsPolicy(ctx, \"default\", \u0026networksecurity.ServerTlsPolicyArgs{\n\t\t\tName: pulumi.String(\"my-server-tls-policy\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tDescription: pulumi.String(\"my description\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tAllowOpen: pulumi.Bool(false),\n\t\t\tMtlsPolicy: \u0026networksecurity.ServerTlsPolicyMtlsPolicyArgs{\n\t\t\t\tClientValidationMode: pulumi.String(\"ALLOW_INVALID_OR_MISSING_CLIENT_CERT\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicy;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicyArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ServerTlsPolicyMtlsPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new ServerTlsPolicy(\"default\", ServerTlsPolicyArgs.builder()\n .name(\"my-server-tls-policy\")\n .labels(Map.of(\"foo\", \"bar\"))\n .description(\"my description\")\n .location(\"global\")\n .allowOpen(\"false\")\n .mtlsPolicy(ServerTlsPolicyMtlsPolicyArgs.builder()\n .clientValidationMode(\"ALLOW_INVALID_OR_MISSING_CLIENT_CERT\")\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:networksecurity:ServerTlsPolicy\n properties:\n name: my-server-tls-policy\n labels:\n foo: bar\n description: my description\n location: global\n allowOpen: 'false'\n mtlsPolicy:\n clientValidationMode: ALLOW_INVALID_OR_MISSING_CLIENT_CERT\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Network Security Server Tls Policy Server Cert\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.networksecurity.ServerTlsPolicy(\"default\", {\n name: \"my-server-tls-policy\",\n labels: {\n foo: \"bar\",\n },\n description: \"my description\",\n location: \"global\",\n allowOpen: false,\n serverCertificate: {\n grpcEndpoint: {\n targetUri: \"unix:mypath\",\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.networksecurity.ServerTlsPolicy(\"default\",\n name=\"my-server-tls-policy\",\n labels={\n \"foo\": \"bar\",\n },\n description=\"my description\",\n location=\"global\",\n allow_open=False,\n server_certificate={\n \"grpc_endpoint\": {\n \"target_uri\": \"unix:mypath\",\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.NetworkSecurity.ServerTlsPolicy(\"default\", new()\n {\n Name = \"my-server-tls-policy\",\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n Description = \"my description\",\n Location = \"global\",\n AllowOpen = false,\n ServerCertificate = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyServerCertificateArgs\n {\n GrpcEndpoint = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyServerCertificateGrpcEndpointArgs\n {\n TargetUri = \"unix:mypath\",\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networksecurity\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := networksecurity.NewServerTlsPolicy(ctx, \"default\", \u0026networksecurity.ServerTlsPolicyArgs{\n\t\t\tName: pulumi.String(\"my-server-tls-policy\"),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tDescription: pulumi.String(\"my description\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tAllowOpen: pulumi.Bool(false),\n\t\t\tServerCertificate: \u0026networksecurity.ServerTlsPolicyServerCertificateArgs{\n\t\t\t\tGrpcEndpoint: \u0026networksecurity.ServerTlsPolicyServerCertificateGrpcEndpointArgs{\n\t\t\t\t\tTargetUri: pulumi.String(\"unix:mypath\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicy;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicyArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ServerTlsPolicyServerCertificateArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ServerTlsPolicyServerCertificateGrpcEndpointArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new ServerTlsPolicy(\"default\", ServerTlsPolicyArgs.builder()\n .name(\"my-server-tls-policy\")\n .labels(Map.of(\"foo\", \"bar\"))\n .description(\"my description\")\n .location(\"global\")\n .allowOpen(\"false\")\n .serverCertificate(ServerTlsPolicyServerCertificateArgs.builder()\n .grpcEndpoint(ServerTlsPolicyServerCertificateGrpcEndpointArgs.builder()\n .targetUri(\"unix:mypath\")\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:networksecurity:ServerTlsPolicy\n properties:\n name: my-server-tls-policy\n labels:\n foo: bar\n description: my description\n location: global\n allowOpen: 'false'\n serverCertificate:\n grpcEndpoint:\n targetUri: unix:mypath\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Network Security Server Tls Policy Mtls\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\nimport * as std from \"@pulumi/std\";\n\nconst project = gcp.organizations.getProject({});\nconst defaultTrustConfig = new gcp.certificatemanager.TrustConfig(\"default\", {\n name: \"my-trust-config\",\n description: \"sample trust config description\",\n location: \"global\",\n trustStores: [{\n trustAnchors: [{\n pemCertificate: std.file({\n input: \"test-fixtures/ca_cert.pem\",\n }).then(invoke =\u003e invoke.result),\n }],\n intermediateCas: [{\n pemCertificate: std.file({\n input: \"test-fixtures/ca_cert.pem\",\n }).then(invoke =\u003e invoke.result),\n }],\n }],\n labels: {\n foo: \"bar\",\n },\n});\nconst _default = new gcp.networksecurity.ServerTlsPolicy(\"default\", {\n name: \"my-server-tls-policy\",\n description: \"my description\",\n location: \"global\",\n allowOpen: false,\n mtlsPolicy: {\n clientValidationMode: \"REJECT_INVALID\",\n clientValidationTrustConfig: pulumi.all([project, defaultTrustConfig.name]).apply(([project, name]) =\u003e `projects/${project.number}/locations/global/trustConfigs/${name}`),\n },\n labels: {\n foo: \"bar\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\nimport pulumi_std as std\n\nproject = gcp.organizations.get_project()\ndefault_trust_config = gcp.certificatemanager.TrustConfig(\"default\",\n name=\"my-trust-config\",\n description=\"sample trust config description\",\n location=\"global\",\n trust_stores=[{\n \"trust_anchors\": [{\n \"pem_certificate\": std.file(input=\"test-fixtures/ca_cert.pem\").result,\n }],\n \"intermediate_cas\": [{\n \"pem_certificate\": std.file(input=\"test-fixtures/ca_cert.pem\").result,\n }],\n }],\n labels={\n \"foo\": \"bar\",\n })\ndefault = gcp.networksecurity.ServerTlsPolicy(\"default\",\n name=\"my-server-tls-policy\",\n description=\"my description\",\n location=\"global\",\n allow_open=False,\n mtls_policy={\n \"client_validation_mode\": \"REJECT_INVALID\",\n \"client_validation_trust_config\": default_trust_config.name.apply(lambda name: f\"projects/{project.number}/locations/global/trustConfigs/{name}\"),\n },\n labels={\n \"foo\": \"bar\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\nusing Std = Pulumi.Std;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var defaultTrustConfig = new Gcp.CertificateManager.TrustConfig(\"default\", new()\n {\n Name = \"my-trust-config\",\n Description = \"sample trust config description\",\n Location = \"global\",\n TrustStores = new[]\n {\n new Gcp.CertificateManager.Inputs.TrustConfigTrustStoreArgs\n {\n TrustAnchors = new[]\n {\n new Gcp.CertificateManager.Inputs.TrustConfigTrustStoreTrustAnchorArgs\n {\n PemCertificate = Std.File.Invoke(new()\n {\n Input = \"test-fixtures/ca_cert.pem\",\n }).Apply(invoke =\u003e invoke.Result),\n },\n },\n IntermediateCas = new[]\n {\n new Gcp.CertificateManager.Inputs.TrustConfigTrustStoreIntermediateCaArgs\n {\n PemCertificate = Std.File.Invoke(new()\n {\n Input = \"test-fixtures/ca_cert.pem\",\n }).Apply(invoke =\u003e invoke.Result),\n },\n },\n },\n },\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n });\n\n var @default = new Gcp.NetworkSecurity.ServerTlsPolicy(\"default\", new()\n {\n Name = \"my-server-tls-policy\",\n Description = \"my description\",\n Location = \"global\",\n AllowOpen = false,\n MtlsPolicy = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyArgs\n {\n ClientValidationMode = \"REJECT_INVALID\",\n ClientValidationTrustConfig = Output.Tuple(project, defaultTrustConfig.Name).Apply(values =\u003e\n {\n var project = values.Item1;\n var name = values.Item2;\n return $\"projects/{project.Apply(getProjectResult =\u003e getProjectResult.Number)}/locations/global/trustConfigs/{name}\";\n }),\n },\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/certificatemanager\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networksecurity\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-std/sdk/go/std\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinvokeFile, err := std.File(ctx, \u0026std.FileArgs{\n\t\t\tInput: \"test-fixtures/ca_cert.pem\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinvokeFile1, err := std.File(ctx, \u0026std.FileArgs{\n\t\t\tInput: \"test-fixtures/ca_cert.pem\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefaultTrustConfig, err := certificatemanager.NewTrustConfig(ctx, \"default\", \u0026certificatemanager.TrustConfigArgs{\n\t\t\tName: pulumi.String(\"my-trust-config\"),\n\t\t\tDescription: pulumi.String(\"sample trust config description\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tTrustStores: certificatemanager.TrustConfigTrustStoreArray{\n\t\t\t\t\u0026certificatemanager.TrustConfigTrustStoreArgs{\n\t\t\t\t\tTrustAnchors: certificatemanager.TrustConfigTrustStoreTrustAnchorArray{\n\t\t\t\t\t\t\u0026certificatemanager.TrustConfigTrustStoreTrustAnchorArgs{\n\t\t\t\t\t\t\tPemCertificate: pulumi.String(invokeFile.Result),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tIntermediateCas: certificatemanager.TrustConfigTrustStoreIntermediateCaArray{\n\t\t\t\t\t\t\u0026certificatemanager.TrustConfigTrustStoreIntermediateCaArgs{\n\t\t\t\t\t\t\tPemCertificate: pulumi.String(invokeFile1.Result),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = networksecurity.NewServerTlsPolicy(ctx, \"default\", \u0026networksecurity.ServerTlsPolicyArgs{\n\t\t\tName: pulumi.String(\"my-server-tls-policy\"),\n\t\t\tDescription: pulumi.String(\"my description\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tAllowOpen: pulumi.Bool(false),\n\t\t\tMtlsPolicy: \u0026networksecurity.ServerTlsPolicyMtlsPolicyArgs{\n\t\t\t\tClientValidationMode: pulumi.String(\"REJECT_INVALID\"),\n\t\t\t\tClientValidationTrustConfig: defaultTrustConfig.Name.ApplyT(func(name string) (string, error) {\n\t\t\t\t\treturn fmt.Sprintf(\"projects/%v/locations/global/trustConfigs/%v\", project.Number, name), nil\n\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t},\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.certificatemanager.TrustConfig;\nimport com.pulumi.gcp.certificatemanager.TrustConfigArgs;\nimport com.pulumi.gcp.certificatemanager.inputs.TrustConfigTrustStoreArgs;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicy;\nimport com.pulumi.gcp.networksecurity.ServerTlsPolicyArgs;\nimport com.pulumi.gcp.networksecurity.inputs.ServerTlsPolicyMtlsPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var project = OrganizationsFunctions.getProject();\n\n var defaultTrustConfig = new TrustConfig(\"defaultTrustConfig\", TrustConfigArgs.builder()\n .name(\"my-trust-config\")\n .description(\"sample trust config description\")\n .location(\"global\")\n .trustStores(TrustConfigTrustStoreArgs.builder()\n .trustAnchors(TrustConfigTrustStoreTrustAnchorArgs.builder()\n .pemCertificate(StdFunctions.file(FileArgs.builder()\n .input(\"test-fixtures/ca_cert.pem\")\n .build()).result())\n .build())\n .intermediateCas(TrustConfigTrustStoreIntermediateCaArgs.builder()\n .pemCertificate(StdFunctions.file(FileArgs.builder()\n .input(\"test-fixtures/ca_cert.pem\")\n .build()).result())\n .build())\n .build())\n .labels(Map.of(\"foo\", \"bar\"))\n .build());\n\n var default_ = new ServerTlsPolicy(\"default\", ServerTlsPolicyArgs.builder()\n .name(\"my-server-tls-policy\")\n .description(\"my description\")\n .location(\"global\")\n .allowOpen(\"false\")\n .mtlsPolicy(ServerTlsPolicyMtlsPolicyArgs.builder()\n .clientValidationMode(\"REJECT_INVALID\")\n .clientValidationTrustConfig(defaultTrustConfig.name().applyValue(name -\u003e String.format(\"projects/%s/locations/global/trustConfigs/%s\", project.applyValue(getProjectResult -\u003e getProjectResult.number()),name)))\n .build())\n .labels(Map.of(\"foo\", \"bar\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:networksecurity:ServerTlsPolicy\n properties:\n name: my-server-tls-policy\n description: my description\n location: global\n allowOpen: 'false'\n mtlsPolicy:\n clientValidationMode: REJECT_INVALID\n clientValidationTrustConfig: projects/${project.number}/locations/global/trustConfigs/${defaultTrustConfig.name}\n labels:\n foo: bar\n defaultTrustConfig:\n type: gcp:certificatemanager:TrustConfig\n name: default\n properties:\n name: my-trust-config\n description: sample trust config description\n location: global\n trustStores:\n - trustAnchors:\n - pemCertificate:\n fn::invoke:\n Function: std:file\n Arguments:\n input: test-fixtures/ca_cert.pem\n Return: result\n intermediateCas:\n - pemCertificate:\n fn::invoke:\n Function: std:file\n Arguments:\n input: test-fixtures/ca_cert.pem\n Return: result\n labels:\n foo: bar\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nServerTlsPolicy can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{name}}`\n\n* `{{project}}/{{location}}/{{name}}`\n\n* `{{location}}/{{name}}`\n\nWhen using the `pulumi import` command, ServerTlsPolicy can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:networksecurity/serverTlsPolicy:ServerTlsPolicy default projects/{{project}}/locations/{{location}}/serverTlsPolicies/{{name}}\n```\n\n```sh\n$ pulumi import gcp:networksecurity/serverTlsPolicy:ServerTlsPolicy default {{project}}/{{location}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:networksecurity/serverTlsPolicy:ServerTlsPolicy default {{location}}/{{name}}\n```\n\n", "properties": { "allowOpen": { "type": "boolean", @@ -227970,7 +229550,7 @@ } }, "gcp:organizations/project:Project": { - "description": "Allows creation and management of a Google Cloud Platform project.\n\nProjects created with this resource must be associated with an Organization.\nSee the [Organization documentation](https://cloud.google.com/resource-manager/docs/quickstarts) for more details.\n\nThe user or service account that is running this provider when creating a `gcp.organizations.Project`\nresource must have `roles/resourcemanager.projectCreator` on the specified organization. See the\n[Access Control for Organizations Using IAM](https://cloud.google.com/resource-manager/docs/access-control-org)\ndoc for more information.\n\n\u003e This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account.\n\nTo get more information about projects, see:\n\n* [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects)\n* How-to Guides\n * [Creating and managing projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects)\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst myProject = new gcp.organizations.Project(\"my_project\", {\n name: \"My Project\",\n projectId: \"your-project-id\",\n orgId: \"1234567\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nmy_project = gcp.organizations.Project(\"my_project\",\n name=\"My Project\",\n project_id=\"your-project-id\",\n org_id=\"1234567\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myProject = new Gcp.Organizations.Project(\"my_project\", new()\n {\n Name = \"My Project\",\n ProjectId = \"your-project-id\",\n OrgId = \"1234567\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.NewProject(ctx, \"my_project\", \u0026organizations.ProjectArgs{\n\t\t\tName: pulumi.String(\"My Project\"),\n\t\t\tProjectId: pulumi.String(\"your-project-id\"),\n\t\t\tOrgId: pulumi.String(\"1234567\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.Project;\nimport com.pulumi.gcp.organizations.ProjectArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var myProject = new Project(\"myProject\", ProjectArgs.builder()\n .name(\"My Project\")\n .projectId(\"your-project-id\")\n .orgId(\"1234567\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n myProject:\n type: gcp:organizations:Project\n name: my_project\n properties:\n name: My Project\n projectId: your-project-id\n orgId: '1234567'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\nTo create a project under a specific folder\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst department1 = new gcp.organizations.Folder(\"department1\", {\n displayName: \"Department 1\",\n parent: \"organizations/1234567\",\n});\nconst myProject_in_a_folder = new gcp.organizations.Project(\"my_project-in-a-folder\", {\n name: \"My Project\",\n projectId: \"your-project-id\",\n folderId: department1.name,\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndepartment1 = gcp.organizations.Folder(\"department1\",\n display_name=\"Department 1\",\n parent=\"organizations/1234567\")\nmy_project_in_a_folder = gcp.organizations.Project(\"my_project-in-a-folder\",\n name=\"My Project\",\n project_id=\"your-project-id\",\n folder_id=department1.name)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var department1 = new Gcp.Organizations.Folder(\"department1\", new()\n {\n DisplayName = \"Department 1\",\n Parent = \"organizations/1234567\",\n });\n\n var myProject_in_a_folder = new Gcp.Organizations.Project(\"my_project-in-a-folder\", new()\n {\n Name = \"My Project\",\n ProjectId = \"your-project-id\",\n FolderId = department1.Name,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tdepartment1, err := organizations.NewFolder(ctx, \"department1\", \u0026organizations.FolderArgs{\n\t\t\tDisplayName: pulumi.String(\"Department 1\"),\n\t\t\tParent: pulumi.String(\"organizations/1234567\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = organizations.NewProject(ctx, \"my_project-in-a-folder\", \u0026organizations.ProjectArgs{\n\t\t\tName: pulumi.String(\"My Project\"),\n\t\t\tProjectId: pulumi.String(\"your-project-id\"),\n\t\t\tFolderId: department1.Name,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.Folder;\nimport com.pulumi.gcp.organizations.FolderArgs;\nimport com.pulumi.gcp.organizations.Project;\nimport com.pulumi.gcp.organizations.ProjectArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var department1 = new Folder(\"department1\", FolderArgs.builder()\n .displayName(\"Department 1\")\n .parent(\"organizations/1234567\")\n .build());\n\n var myProject_in_a_folder = new Project(\"myProject-in-a-folder\", ProjectArgs.builder()\n .name(\"My Project\")\n .projectId(\"your-project-id\")\n .folderId(department1.name())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n myProject-in-a-folder:\n type: gcp:organizations:Project\n name: my_project-in-a-folder\n properties:\n name: My Project\n projectId: your-project-id\n folderId: ${department1.name}\n department1:\n type: gcp:organizations:Folder\n properties:\n displayName: Department 1\n parent: organizations/1234567\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nProjects can be imported using the `project_id`, e.g.\n\n* `{{project_id}}`\n\nWhen using the `pulumi import` command, Projects can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:organizations/project:Project default {{project_id}}\n```\n\n", + "description": "Allows creation and management of a Google Cloud Platform project.\n\nProjects created with this resource must be associated with an Organization.\nSee the [Organization documentation](https://cloud.google.com/resource-manager/docs/quickstarts) for more details.\n\nThe user or service account that is running this provider when creating a `gcp.organizations.Project`\nresource must have `roles/resourcemanager.projectCreator` on the specified organization. See the\n[Access Control for Organizations Using IAM](https://cloud.google.com/resource-manager/docs/access-control-org)\ndoc for more information.\n\n\u003e This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account.\n\n\u003e It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `auto_create_network` to false, when possible.\n\n\u003e It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. \n\nTo get more information about projects, see:\n\n* [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects)\n* How-to Guides\n * [Creating and managing projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects)\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst myProject = new gcp.organizations.Project(\"my_project\", {\n name: \"My Project\",\n projectId: \"your-project-id\",\n orgId: \"1234567\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nmy_project = gcp.organizations.Project(\"my_project\",\n name=\"My Project\",\n project_id=\"your-project-id\",\n org_id=\"1234567\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myProject = new Gcp.Organizations.Project(\"my_project\", new()\n {\n Name = \"My Project\",\n ProjectId = \"your-project-id\",\n OrgId = \"1234567\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.NewProject(ctx, \"my_project\", \u0026organizations.ProjectArgs{\n\t\t\tName: pulumi.String(\"My Project\"),\n\t\t\tProjectId: pulumi.String(\"your-project-id\"),\n\t\t\tOrgId: pulumi.String(\"1234567\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.Project;\nimport com.pulumi.gcp.organizations.ProjectArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var myProject = new Project(\"myProject\", ProjectArgs.builder()\n .name(\"My Project\")\n .projectId(\"your-project-id\")\n .orgId(\"1234567\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n myProject:\n type: gcp:organizations:Project\n name: my_project\n properties:\n name: My Project\n projectId: your-project-id\n orgId: '1234567'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\nTo create a project under a specific folder\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst department1 = new gcp.organizations.Folder(\"department1\", {\n displayName: \"Department 1\",\n parent: \"organizations/1234567\",\n});\nconst myProject_in_a_folder = new gcp.organizations.Project(\"my_project-in-a-folder\", {\n name: \"My Project\",\n projectId: \"your-project-id\",\n folderId: department1.name,\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndepartment1 = gcp.organizations.Folder(\"department1\",\n display_name=\"Department 1\",\n parent=\"organizations/1234567\")\nmy_project_in_a_folder = gcp.organizations.Project(\"my_project-in-a-folder\",\n name=\"My Project\",\n project_id=\"your-project-id\",\n folder_id=department1.name)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var department1 = new Gcp.Organizations.Folder(\"department1\", new()\n {\n DisplayName = \"Department 1\",\n Parent = \"organizations/1234567\",\n });\n\n var myProject_in_a_folder = new Gcp.Organizations.Project(\"my_project-in-a-folder\", new()\n {\n Name = \"My Project\",\n ProjectId = \"your-project-id\",\n FolderId = department1.Name,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tdepartment1, err := organizations.NewFolder(ctx, \"department1\", \u0026organizations.FolderArgs{\n\t\t\tDisplayName: pulumi.String(\"Department 1\"),\n\t\t\tParent: pulumi.String(\"organizations/1234567\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = organizations.NewProject(ctx, \"my_project-in-a-folder\", \u0026organizations.ProjectArgs{\n\t\t\tName: pulumi.String(\"My Project\"),\n\t\t\tProjectId: pulumi.String(\"your-project-id\"),\n\t\t\tFolderId: department1.Name,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.Folder;\nimport com.pulumi.gcp.organizations.FolderArgs;\nimport com.pulumi.gcp.organizations.Project;\nimport com.pulumi.gcp.organizations.ProjectArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var department1 = new Folder(\"department1\", FolderArgs.builder()\n .displayName(\"Department 1\")\n .parent(\"organizations/1234567\")\n .build());\n\n var myProject_in_a_folder = new Project(\"myProject-in-a-folder\", ProjectArgs.builder()\n .name(\"My Project\")\n .projectId(\"your-project-id\")\n .folderId(department1.name())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n myProject-in-a-folder:\n type: gcp:organizations:Project\n name: my_project-in-a-folder\n properties:\n name: My Project\n projectId: your-project-id\n folderId: ${department1.name}\n department1:\n type: gcp:organizations:Folder\n properties:\n displayName: Department 1\n parent: organizations/1234567\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\nTo create a project with a tag\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst myProject = new gcp.organizations.Project(\"my_project\", {\n name: \"My Project\",\n projectId: \"your-project-id\",\n orgId: \"1234567\",\n tags: {\n \"1234567/env\": \"staging\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nmy_project = gcp.organizations.Project(\"my_project\",\n name=\"My Project\",\n project_id=\"your-project-id\",\n org_id=\"1234567\",\n tags={\n \"1234567/env\": \"staging\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myProject = new Gcp.Organizations.Project(\"my_project\", new()\n {\n Name = \"My Project\",\n ProjectId = \"your-project-id\",\n OrgId = \"1234567\",\n Tags = \n {\n { \"1234567/env\", \"staging\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.NewProject(ctx, \"my_project\", \u0026organizations.ProjectArgs{\n\t\t\tName: pulumi.String(\"My Project\"),\n\t\t\tProjectId: pulumi.String(\"your-project-id\"),\n\t\t\tOrgId: pulumi.String(\"1234567\"),\n\t\t\tTags: pulumi.StringMap{\n\t\t\t\t\"1234567/env\": pulumi.String(\"staging\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.Project;\nimport com.pulumi.gcp.organizations.ProjectArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var myProject = new Project(\"myProject\", ProjectArgs.builder()\n .name(\"My Project\")\n .projectId(\"your-project-id\")\n .orgId(\"1234567\")\n .tags(Map.of(\"1234567/env\", \"staging\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n myProject:\n type: gcp:organizations:Project\n name: my_project\n properties:\n name: My Project\n projectId: your-project-id\n orgId: '1234567'\n tags:\n 1234567/env: staging\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nProjects can be imported using the `project_id`, e.g.\n\n* `{{project_id}}`\n\nWhen using the `pulumi import` command, Projects can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:organizations/project:Project default {{project_id}}\n```\n\n", "properties": { "autoCreateNetwork": { "type": "boolean", @@ -228025,6 +229605,13 @@ }, "description": "The combination of labels configured directly on the resource and default labels configured on the provider.\n", "secret": true + }, + "tags": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated.\n" } }, "required": [ @@ -228069,6 +229656,14 @@ "type": "string", "description": "The project ID. Changing this forces a new project to be created.\n", "willReplaceOnChanges": true + }, + "tags": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated.\n", + "willReplaceOnChanges": true } }, "stateInputs": { @@ -228128,6 +229723,14 @@ }, "description": "The combination of labels configured directly on the resource and default labels configured on the provider.\n", "secret": true + }, + "tags": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated.\n", + "willReplaceOnChanges": true } }, "type": "object" @@ -229038,7 +230641,7 @@ }, "daosVersion": { "type": "string", - "description": "The version of DAOS software running in the instance\n" + "description": "The version of DAOS software running in the instance.\n" }, "description": { "type": "string", @@ -229058,7 +230661,7 @@ }, "effectiveReservedIpRange": { "type": "string", - "description": "Immutable. Contains the id of the allocated IP address range associated with the\nprivate service access connection for example, \"test-default\" associated\nwith IP range 10.0.0.0/29. This field is populated by the service and\nand contains the value currently used by the service.\n" + "description": "Immutable. Contains the id of the allocated IP address\nrange associated with the private service access connection for example, \\\"test-default\\\"\nassociated with IP range 10.0.0.0/29. This field is populated by the service\nand contains the value currently used by the service.\n" }, "fileStripeLevel": { "type": "string", @@ -229073,7 +230676,7 @@ "additionalProperties": { "type": "string" }, - "description": "Cloud Labels are a flexible and lightweight mechanism for organizing cloud\nresources into groups that reflect a customer's organizational needs and\ndeployment strategies. Cloud Labels can be used to filter collections of\nresources. They can be used to control how resource metrics are aggregated.\nAnd they can be used as arguments to policy management rules (e.g. route,\nfirewall, load balancing, etc.).\n* Label keys must be between 1 and 63 characters long and must conform to\nthe following regular expression: `a-z{0,62}`.\n* Label values must be between 0 and 63 characters long and must conform\nto the regular expression `[a-z0-9_-]{0,63}`.\n* No more than 64 labels can be associated with a given resource.\nSee https://goo.gl/xmQnxf for more information on and examples of labels.\nIf you plan to use labels in your own code, please note that additional\ncharacters may be allowed in the future. Therefore, you are advised to use\nan internal label representation, such as JSON, which doesn't rely upon\nspecific characters being disallowed. For example, representing labels\nas the string: name + \"_\" + value would prove problematic if we were to\nallow \"_\" in a future release.\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.\n" + "description": "Cloud Labels are a flexible and lightweight mechanism for\norganizing cloud resources into groups that reflect a customer's organizational\nneeds and deployment strategies. Cloud Labels can be used to filter collections\nof resources. They can be used to control how resource metrics are aggregated.\nAnd they can be used as arguments to policy management rules (e.g. route, firewall,\nload balancing, etc.).\n* Label keys must be between 1 and 63 characters long and must conform to\nthe following regular expression: `a-z{0,62}`.\n* Label values must be between 0 and 63 characters long and must conform\nto the regular expression `[a-z0-9_-]{0,63}`.\n* No more than 64 labels can be associated with a given resource.\nSee https://goo.gl/xmQnxf for more information on and examples of labels.\nIf you plan to use labels in your own code, please note that additional\ncharacters may be allowed in the future. Therefore, you are advised to use\nan internal label representation, such as JSON, which doesn't rely upon\nspecific characters being disallowed. For example, representing labels\nas the string: `name + \"_\" + value` would prove problematic if we were to\nallow `\"_\"` in a future release. \"\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.\n" }, "location": { "type": "string", @@ -229085,7 +230688,7 @@ }, "network": { "type": "string", - "description": "Immutable. The name of the Google Compute Engine\n[VPC network](https://cloud.google.com/vpc/docs/vpc) to which the\ninstance is connected.\n" + "description": "Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc)\nto which the instance is connected.\n" }, "project": { "type": "string", @@ -229101,7 +230704,7 @@ }, "reservedIpRange": { "type": "string", - "description": "Immutable. Contains the id of the allocated IP address range associated with the\nprivate service access connection for example, \"test-default\" associated\nwith IP range 10.0.0.0/29. If no range id is provided all ranges will be\nconsidered.\n" + "description": "Immutable. Contains the id of the allocated IP address range\nassociated with the private service access connection for example, \\\"test-default\\\"\nassociated with IP range 10.0.0.0/29. If no range id is provided all ranges will\nbe considered.\n" }, "state": { "type": "string", @@ -229155,7 +230758,7 @@ "additionalProperties": { "type": "string" }, - "description": "Cloud Labels are a flexible and lightweight mechanism for organizing cloud\nresources into groups that reflect a customer's organizational needs and\ndeployment strategies. Cloud Labels can be used to filter collections of\nresources. They can be used to control how resource metrics are aggregated.\nAnd they can be used as arguments to policy management rules (e.g. route,\nfirewall, load balancing, etc.).\n* Label keys must be between 1 and 63 characters long and must conform to\nthe following regular expression: `a-z{0,62}`.\n* Label values must be between 0 and 63 characters long and must conform\nto the regular expression `[a-z0-9_-]{0,63}`.\n* No more than 64 labels can be associated with a given resource.\nSee https://goo.gl/xmQnxf for more information on and examples of labels.\nIf you plan to use labels in your own code, please note that additional\ncharacters may be allowed in the future. Therefore, you are advised to use\nan internal label representation, such as JSON, which doesn't rely upon\nspecific characters being disallowed. For example, representing labels\nas the string: name + \"_\" + value would prove problematic if we were to\nallow \"_\" in a future release.\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.\n" + "description": "Cloud Labels are a flexible and lightweight mechanism for\norganizing cloud resources into groups that reflect a customer's organizational\nneeds and deployment strategies. Cloud Labels can be used to filter collections\nof resources. They can be used to control how resource metrics are aggregated.\nAnd they can be used as arguments to policy management rules (e.g. route, firewall,\nload balancing, etc.).\n* Label keys must be between 1 and 63 characters long and must conform to\nthe following regular expression: `a-z{0,62}`.\n* Label values must be between 0 and 63 characters long and must conform\nto the regular expression `[a-z0-9_-]{0,63}`.\n* No more than 64 labels can be associated with a given resource.\nSee https://goo.gl/xmQnxf for more information on and examples of labels.\nIf you plan to use labels in your own code, please note that additional\ncharacters may be allowed in the future. Therefore, you are advised to use\nan internal label representation, such as JSON, which doesn't rely upon\nspecific characters being disallowed. For example, representing labels\nas the string: `name + \"_\" + value` would prove problematic if we were to\nallow `\"_\"` in a future release. \"\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.\n" }, "location": { "type": "string", @@ -229164,7 +230767,7 @@ }, "network": { "type": "string", - "description": "Immutable. The name of the Google Compute Engine\n[VPC network](https://cloud.google.com/vpc/docs/vpc) to which the\ninstance is connected.\n", + "description": "Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc)\nto which the instance is connected.\n", "willReplaceOnChanges": true }, "project": { @@ -229174,7 +230777,7 @@ }, "reservedIpRange": { "type": "string", - "description": "Immutable. Contains the id of the allocated IP address range associated with the\nprivate service access connection for example, \"test-default\" associated\nwith IP range 10.0.0.0/29. If no range id is provided all ranges will be\nconsidered.\n", + "description": "Immutable. Contains the id of the allocated IP address range\nassociated with the private service access connection for example, \\\"test-default\\\"\nassociated with IP range 10.0.0.0/29. If no range id is provided all ranges will\nbe considered.\n", "willReplaceOnChanges": true } }, @@ -229204,7 +230807,7 @@ }, "daosVersion": { "type": "string", - "description": "The version of DAOS software running in the instance\n" + "description": "The version of DAOS software running in the instance.\n" }, "description": { "type": "string", @@ -229224,7 +230827,7 @@ }, "effectiveReservedIpRange": { "type": "string", - "description": "Immutable. Contains the id of the allocated IP address range associated with the\nprivate service access connection for example, \"test-default\" associated\nwith IP range 10.0.0.0/29. This field is populated by the service and\nand contains the value currently used by the service.\n" + "description": "Immutable. Contains the id of the allocated IP address\nrange associated with the private service access connection for example, \\\"test-default\\\"\nassociated with IP range 10.0.0.0/29. This field is populated by the service\nand contains the value currently used by the service.\n" }, "fileStripeLevel": { "type": "string", @@ -229240,7 +230843,7 @@ "additionalProperties": { "type": "string" }, - "description": "Cloud Labels are a flexible and lightweight mechanism for organizing cloud\nresources into groups that reflect a customer's organizational needs and\ndeployment strategies. Cloud Labels can be used to filter collections of\nresources. They can be used to control how resource metrics are aggregated.\nAnd they can be used as arguments to policy management rules (e.g. route,\nfirewall, load balancing, etc.).\n* Label keys must be between 1 and 63 characters long and must conform to\nthe following regular expression: `a-z{0,62}`.\n* Label values must be between 0 and 63 characters long and must conform\nto the regular expression `[a-z0-9_-]{0,63}`.\n* No more than 64 labels can be associated with a given resource.\nSee https://goo.gl/xmQnxf for more information on and examples of labels.\nIf you plan to use labels in your own code, please note that additional\ncharacters may be allowed in the future. Therefore, you are advised to use\nan internal label representation, such as JSON, which doesn't rely upon\nspecific characters being disallowed. For example, representing labels\nas the string: name + \"_\" + value would prove problematic if we were to\nallow \"_\" in a future release.\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.\n" + "description": "Cloud Labels are a flexible and lightweight mechanism for\norganizing cloud resources into groups that reflect a customer's organizational\nneeds and deployment strategies. Cloud Labels can be used to filter collections\nof resources. They can be used to control how resource metrics are aggregated.\nAnd they can be used as arguments to policy management rules (e.g. route, firewall,\nload balancing, etc.).\n* Label keys must be between 1 and 63 characters long and must conform to\nthe following regular expression: `a-z{0,62}`.\n* Label values must be between 0 and 63 characters long and must conform\nto the regular expression `[a-z0-9_-]{0,63}`.\n* No more than 64 labels can be associated with a given resource.\nSee https://goo.gl/xmQnxf for more information on and examples of labels.\nIf you plan to use labels in your own code, please note that additional\ncharacters may be allowed in the future. Therefore, you are advised to use\nan internal label representation, such as JSON, which doesn't rely upon\nspecific characters being disallowed. For example, representing labels\nas the string: `name + \"_\" + value` would prove problematic if we were to\nallow `\"_\"` in a future release. \"\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.\n" }, "location": { "type": "string", @@ -229253,7 +230856,7 @@ }, "network": { "type": "string", - "description": "Immutable. The name of the Google Compute Engine\n[VPC network](https://cloud.google.com/vpc/docs/vpc) to which the\ninstance is connected.\n", + "description": "Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc)\nto which the instance is connected.\n", "willReplaceOnChanges": true }, "project": { @@ -229271,7 +230874,7 @@ }, "reservedIpRange": { "type": "string", - "description": "Immutable. Contains the id of the allocated IP address range associated with the\nprivate service access connection for example, \"test-default\" associated\nwith IP range 10.0.0.0/29. If no range id is provided all ranges will be\nconsidered.\n", + "description": "Immutable. Contains the id of the allocated IP address range\nassociated with the private service access connection for example, \\\"test-default\\\"\nassociated with IP range 10.0.0.0/29. If no range id is provided all ranges will\nbe considered.\n", "willReplaceOnChanges": true }, "state": { @@ -230239,7 +231842,7 @@ } }, "gcp:projects/iamMemberRemove:IamMemberRemove": { - "description": "Ensures that a member:role pairing does not exist in a project's IAM policy. \n\nOn create, this resource will modify the policy to remove the `member` from the\n`role`. If the membership is ever re-added, the next refresh will clear this\nresource from state, proposing re-adding it to correct the membership. Import is\nnot supported- this resource will acquire the current policy and modify it as\npart of creating the resource.\n\nThis resource will conflict with `gcp.projects.IAMPolicy` and\n`gcp.projects.IAMBinding` resources that share a role, as well as\n`gcp.projects.IAMMember` resources that target the same membership. When\nmultiple resources conflict the final state is not guaranteed to include or omit\nthe membership. Subsequent `pulumi up` calls will always show a diff\nuntil the configuration is corrected.\n\nFor more information see\n[the official documentation](https://cloud.google.com/iam/docs/granting-changing-revoking-access)\nand\n[API reference](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy).\n\n", + "description": "Ensures that a member:role pairing does not exist in a project's IAM policy. \n\nOn create, this resource will modify the policy to remove the `member` from the\n`role`. If the membership is ever re-added, the next refresh will clear this\nresource from state, proposing re-adding it to correct the membership. Import is\nnot supported- this resource will acquire the current policy and modify it as\npart of creating the resource.\n\nThis resource will conflict with `gcp.projects.IAMPolicy` and\n`gcp.projects.IAMBinding` resources that share a role, as well as\n`gcp.projects.IAMMember` resources that target the same membership. When\nmultiple resources conflict the final state is not guaranteed to include or omit\nthe membership. Subsequent `pulumi up` calls will always show a diff\nuntil the configuration is corrected.\n\nFor more information see\n[the official documentation](https://cloud.google.com/iam/docs/granting-changing-revoking-access)\nand\n[API reference](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy).\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst targetProject = gcp.organizations.getProject({});\nconst foo = new gcp.projects.IamMemberRemove(\"foo\", {\n role: \"roles/editor\",\n project: targetProjectGoogleProject.projectId,\n member: `serviceAccount:${targetProjectGoogleProject.number}-compute@developer.gserviceaccount.com`,\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ntarget_project = gcp.organizations.get_project()\nfoo = gcp.projects.IamMemberRemove(\"foo\",\n role=\"roles/editor\",\n project=target_project_google_project[\"projectId\"],\n member=f\"serviceAccount:{target_project_google_project['number']}-compute@developer.gserviceaccount.com\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var targetProject = Gcp.Organizations.GetProject.Invoke();\n\n var foo = new Gcp.Projects.IamMemberRemove(\"foo\", new()\n {\n Role = \"roles/editor\",\n Project = targetProjectGoogleProject.ProjectId,\n Member = $\"serviceAccount:{targetProjectGoogleProject.Number}-compute@developer.gserviceaccount.com\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = projects.NewIamMemberRemove(ctx, \"foo\", \u0026projects.IamMemberRemoveArgs{\n\t\t\tRole: pulumi.String(\"roles/editor\"),\n\t\t\tProject: pulumi.Any(targetProjectGoogleProject.ProjectId),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:%v-compute@developer.gserviceaccount.com\", targetProjectGoogleProject.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.projects.IamMemberRemove;\nimport com.pulumi.gcp.projects.IamMemberRemoveArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var targetProject = OrganizationsFunctions.getProject();\n\n var foo = new IamMemberRemove(\"foo\", IamMemberRemoveArgs.builder()\n .role(\"roles/editor\")\n .project(targetProjectGoogleProject.projectId())\n .member(String.format(\"serviceAccount:%s-compute@developer.gserviceaccount.com\", targetProjectGoogleProject.number()))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n foo:\n type: gcp:projects:IamMemberRemove\n properties:\n role: roles/editor\n project: ${targetProjectGoogleProject.projectId}\n member: serviceAccount:${targetProjectGoogleProject.number}-compute@developer.gserviceaccount.com\nvariables:\n targetProject:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n", "properties": { "member": { "type": "string", @@ -230582,7 +232185,7 @@ } }, "gcp:projects/usageExportBucket:UsageExportBucket": { - "description": "Allows creation and management of a Google Cloud Platform project.\n\nProjects created with this resource must be associated with an Organization.\nSee the [Organization documentation](https://cloud.google.com/resource-manager/docs/quickstarts) for more details.\n\nThe user or service account that is running this provider when creating a `gcp.organizations.Project`\nresource must have `roles/resourcemanager.projectCreator` on the specified organization. See the\n[Access Control for Organizations Using IAM](https://cloud.google.com/resource-manager/docs/access-control-org)\ndoc for more information.\n\n\u003e This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account.\n\nTo get more information about projects, see:\n\n* [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects)\n* How-to Guides\n * [Creating and managing projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects)\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst myProject = new gcp.organizations.Project(\"my_project\", {\n name: \"My Project\",\n projectId: \"your-project-id\",\n orgId: \"1234567\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nmy_project = gcp.organizations.Project(\"my_project\",\n name=\"My Project\",\n project_id=\"your-project-id\",\n org_id=\"1234567\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myProject = new Gcp.Organizations.Project(\"my_project\", new()\n {\n Name = \"My Project\",\n ProjectId = \"your-project-id\",\n OrgId = \"1234567\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.NewProject(ctx, \"my_project\", \u0026organizations.ProjectArgs{\n\t\t\tName: pulumi.String(\"My Project\"),\n\t\t\tProjectId: pulumi.String(\"your-project-id\"),\n\t\t\tOrgId: pulumi.String(\"1234567\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.Project;\nimport com.pulumi.gcp.organizations.ProjectArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var myProject = new Project(\"myProject\", ProjectArgs.builder()\n .name(\"My Project\")\n .projectId(\"your-project-id\")\n .orgId(\"1234567\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n myProject:\n type: gcp:organizations:Project\n name: my_project\n properties:\n name: My Project\n projectId: your-project-id\n orgId: '1234567'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\nTo create a project under a specific folder\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst department1 = new gcp.organizations.Folder(\"department1\", {\n displayName: \"Department 1\",\n parent: \"organizations/1234567\",\n});\nconst myProject_in_a_folder = new gcp.organizations.Project(\"my_project-in-a-folder\", {\n name: \"My Project\",\n projectId: \"your-project-id\",\n folderId: department1.name,\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndepartment1 = gcp.organizations.Folder(\"department1\",\n display_name=\"Department 1\",\n parent=\"organizations/1234567\")\nmy_project_in_a_folder = gcp.organizations.Project(\"my_project-in-a-folder\",\n name=\"My Project\",\n project_id=\"your-project-id\",\n folder_id=department1.name)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var department1 = new Gcp.Organizations.Folder(\"department1\", new()\n {\n DisplayName = \"Department 1\",\n Parent = \"organizations/1234567\",\n });\n\n var myProject_in_a_folder = new Gcp.Organizations.Project(\"my_project-in-a-folder\", new()\n {\n Name = \"My Project\",\n ProjectId = \"your-project-id\",\n FolderId = department1.Name,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tdepartment1, err := organizations.NewFolder(ctx, \"department1\", \u0026organizations.FolderArgs{\n\t\t\tDisplayName: pulumi.String(\"Department 1\"),\n\t\t\tParent: pulumi.String(\"organizations/1234567\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = organizations.NewProject(ctx, \"my_project-in-a-folder\", \u0026organizations.ProjectArgs{\n\t\t\tName: pulumi.String(\"My Project\"),\n\t\t\tProjectId: pulumi.String(\"your-project-id\"),\n\t\t\tFolderId: department1.Name,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.Folder;\nimport com.pulumi.gcp.organizations.FolderArgs;\nimport com.pulumi.gcp.organizations.Project;\nimport com.pulumi.gcp.organizations.ProjectArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var department1 = new Folder(\"department1\", FolderArgs.builder()\n .displayName(\"Department 1\")\n .parent(\"organizations/1234567\")\n .build());\n\n var myProject_in_a_folder = new Project(\"myProject-in-a-folder\", ProjectArgs.builder()\n .name(\"My Project\")\n .projectId(\"your-project-id\")\n .folderId(department1.name())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n myProject-in-a-folder:\n type: gcp:organizations:Project\n name: my_project-in-a-folder\n properties:\n name: My Project\n projectId: your-project-id\n folderId: ${department1.name}\n department1:\n type: gcp:organizations:Folder\n properties:\n displayName: Department 1\n parent: organizations/1234567\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nProjects can be imported using the `project_id`, e.g.\n\n* `{{project_id}}`\n\nWhen using the `pulumi import` command, Projects can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:projects/usageExportBucket:UsageExportBucket default {{project_id}}\n```\n\n", + "description": "Allows creation and management of a Google Cloud Platform project.\n\nProjects created with this resource must be associated with an Organization.\nSee the [Organization documentation](https://cloud.google.com/resource-manager/docs/quickstarts) for more details.\n\nThe user or service account that is running this provider when creating a `gcp.organizations.Project`\nresource must have `roles/resourcemanager.projectCreator` on the specified organization. See the\n[Access Control for Organizations Using IAM](https://cloud.google.com/resource-manager/docs/access-control-org)\ndoc for more information.\n\n\u003e This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account.\n\n\u003e It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `auto_create_network` to false, when possible.\n\n\u003e It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. \n\nTo get more information about projects, see:\n\n* [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects)\n* How-to Guides\n * [Creating and managing projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects)\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst myProject = new gcp.organizations.Project(\"my_project\", {\n name: \"My Project\",\n projectId: \"your-project-id\",\n orgId: \"1234567\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nmy_project = gcp.organizations.Project(\"my_project\",\n name=\"My Project\",\n project_id=\"your-project-id\",\n org_id=\"1234567\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myProject = new Gcp.Organizations.Project(\"my_project\", new()\n {\n Name = \"My Project\",\n ProjectId = \"your-project-id\",\n OrgId = \"1234567\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.NewProject(ctx, \"my_project\", \u0026organizations.ProjectArgs{\n\t\t\tName: pulumi.String(\"My Project\"),\n\t\t\tProjectId: pulumi.String(\"your-project-id\"),\n\t\t\tOrgId: pulumi.String(\"1234567\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.Project;\nimport com.pulumi.gcp.organizations.ProjectArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var myProject = new Project(\"myProject\", ProjectArgs.builder()\n .name(\"My Project\")\n .projectId(\"your-project-id\")\n .orgId(\"1234567\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n myProject:\n type: gcp:organizations:Project\n name: my_project\n properties:\n name: My Project\n projectId: your-project-id\n orgId: '1234567'\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\nTo create a project under a specific folder\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst department1 = new gcp.organizations.Folder(\"department1\", {\n displayName: \"Department 1\",\n parent: \"organizations/1234567\",\n});\nconst myProject_in_a_folder = new gcp.organizations.Project(\"my_project-in-a-folder\", {\n name: \"My Project\",\n projectId: \"your-project-id\",\n folderId: department1.name,\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndepartment1 = gcp.organizations.Folder(\"department1\",\n display_name=\"Department 1\",\n parent=\"organizations/1234567\")\nmy_project_in_a_folder = gcp.organizations.Project(\"my_project-in-a-folder\",\n name=\"My Project\",\n project_id=\"your-project-id\",\n folder_id=department1.name)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var department1 = new Gcp.Organizations.Folder(\"department1\", new()\n {\n DisplayName = \"Department 1\",\n Parent = \"organizations/1234567\",\n });\n\n var myProject_in_a_folder = new Gcp.Organizations.Project(\"my_project-in-a-folder\", new()\n {\n Name = \"My Project\",\n ProjectId = \"your-project-id\",\n FolderId = department1.Name,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tdepartment1, err := organizations.NewFolder(ctx, \"department1\", \u0026organizations.FolderArgs{\n\t\t\tDisplayName: pulumi.String(\"Department 1\"),\n\t\t\tParent: pulumi.String(\"organizations/1234567\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = organizations.NewProject(ctx, \"my_project-in-a-folder\", \u0026organizations.ProjectArgs{\n\t\t\tName: pulumi.String(\"My Project\"),\n\t\t\tProjectId: pulumi.String(\"your-project-id\"),\n\t\t\tFolderId: department1.Name,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.Folder;\nimport com.pulumi.gcp.organizations.FolderArgs;\nimport com.pulumi.gcp.organizations.Project;\nimport com.pulumi.gcp.organizations.ProjectArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var department1 = new Folder(\"department1\", FolderArgs.builder()\n .displayName(\"Department 1\")\n .parent(\"organizations/1234567\")\n .build());\n\n var myProject_in_a_folder = new Project(\"myProject-in-a-folder\", ProjectArgs.builder()\n .name(\"My Project\")\n .projectId(\"your-project-id\")\n .folderId(department1.name())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n myProject-in-a-folder:\n type: gcp:organizations:Project\n name: my_project-in-a-folder\n properties:\n name: My Project\n projectId: your-project-id\n folderId: ${department1.name}\n department1:\n type: gcp:organizations:Folder\n properties:\n displayName: Department 1\n parent: organizations/1234567\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\nTo create a project with a tag\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst myProject = new gcp.organizations.Project(\"my_project\", {\n name: \"My Project\",\n projectId: \"your-project-id\",\n orgId: \"1234567\",\n tags: {\n \"1234567/env\": \"staging\",\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nmy_project = gcp.organizations.Project(\"my_project\",\n name=\"My Project\",\n project_id=\"your-project-id\",\n org_id=\"1234567\",\n tags={\n \"1234567/env\": \"staging\",\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myProject = new Gcp.Organizations.Project(\"my_project\", new()\n {\n Name = \"My Project\",\n ProjectId = \"your-project-id\",\n OrgId = \"1234567\",\n Tags = \n {\n { \"1234567/env\", \"staging\" },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := organizations.NewProject(ctx, \"my_project\", \u0026organizations.ProjectArgs{\n\t\t\tName: pulumi.String(\"My Project\"),\n\t\t\tProjectId: pulumi.String(\"your-project-id\"),\n\t\t\tOrgId: pulumi.String(\"1234567\"),\n\t\t\tTags: pulumi.StringMap{\n\t\t\t\t\"1234567/env\": pulumi.String(\"staging\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.Project;\nimport com.pulumi.gcp.organizations.ProjectArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var myProject = new Project(\"myProject\", ProjectArgs.builder()\n .name(\"My Project\")\n .projectId(\"your-project-id\")\n .orgId(\"1234567\")\n .tags(Map.of(\"1234567/env\", \"staging\"))\n .build());\n\n }\n}\n```\n```yaml\nresources:\n myProject:\n type: gcp:organizations:Project\n name: my_project\n properties:\n name: My Project\n projectId: your-project-id\n orgId: '1234567'\n tags:\n 1234567/env: staging\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nProjects can be imported using the `project_id`, e.g.\n\n* `{{project_id}}`\n\nWhen using the `pulumi import` command, Projects can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:projects/usageExportBucket:UsageExportBucket default {{project_id}}\n```\n\n", "properties": { "bucketName": { "type": "string", @@ -231268,7 +232871,7 @@ } }, "gcp:pubsub/subscription:Subscription": { - "description": "A named resource representing the stream of messages from a single,\nspecific topic, to be delivered to the subscribing application.\n\n\nTo get more information about Subscription, see:\n\n* [API documentation](https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions)\n* How-to Guides\n * [Managing Subscriptions](https://cloud.google.com/pubsub/docs/admin#managing_subscriptions)\n\n\u003e **Note:** You can retrieve the email of the Google Managed Pub/Sub Service Account used for forwarding\nby using the `gcp.projects.ServiceIdentity` resource.\n\n## Example Usage\n\n### Pubsub Subscription Push\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: example.id,\n ackDeadlineSeconds: 20,\n labels: {\n foo: \"bar\",\n },\n pushConfig: {\n pushEndpoint: \"https://example.com/push\",\n attributes: {\n \"x-goog-version\": \"v1\",\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example.id,\n ack_deadline_seconds=20,\n labels={\n \"foo\": \"bar\",\n },\n push_config={\n \"push_endpoint\": \"https://example.com/push\",\n \"attributes\": {\n \"x_goog_version\": \"v1\",\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = example.Id,\n AckDeadlineSeconds = 20,\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n PushConfig = new Gcp.PubSub.Inputs.SubscriptionPushConfigArgs\n {\n PushEndpoint = \"https://example.com/push\",\n Attributes = \n {\n { \"x-goog-version\", \"v1\" },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: example.ID(),\n\t\t\tAckDeadlineSeconds: pulumi.Int(20),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tPushConfig: \u0026pubsub.SubscriptionPushConfigArgs{\n\t\t\t\tPushEndpoint: pulumi.String(\"https://example.com/push\"),\n\t\t\t\tAttributes: pulumi.StringMap{\n\t\t\t\t\t\"x-goog-version\": pulumi.String(\"v1\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionPushConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Topic(\"example\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(example.id())\n .ackDeadlineSeconds(20)\n .labels(Map.of(\"foo\", \"bar\"))\n .pushConfig(SubscriptionPushConfigArgs.builder()\n .pushEndpoint(\"https://example.com/push\")\n .attributes(Map.of(\"x-goog-version\", \"v1\"))\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:pubsub:Topic\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${example.id}\n ackDeadlineSeconds: 20\n labels:\n foo: bar\n pushConfig:\n pushEndpoint: https://example.com/push\n attributes:\n x-goog-version: v1\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Pull\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: example.id,\n labels: {\n foo: \"bar\",\n },\n messageRetentionDuration: \"1200s\",\n retainAckedMessages: true,\n ackDeadlineSeconds: 20,\n expirationPolicy: {\n ttl: \"300000.5s\",\n },\n retryPolicy: {\n minimumBackoff: \"10s\",\n },\n enableMessageOrdering: false,\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example.id,\n labels={\n \"foo\": \"bar\",\n },\n message_retention_duration=\"1200s\",\n retain_acked_messages=True,\n ack_deadline_seconds=20,\n expiration_policy={\n \"ttl\": \"300000.5s\",\n },\n retry_policy={\n \"minimum_backoff\": \"10s\",\n },\n enable_message_ordering=False)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = example.Id,\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n MessageRetentionDuration = \"1200s\",\n RetainAckedMessages = true,\n AckDeadlineSeconds = 20,\n ExpirationPolicy = new Gcp.PubSub.Inputs.SubscriptionExpirationPolicyArgs\n {\n Ttl = \"300000.5s\",\n },\n RetryPolicy = new Gcp.PubSub.Inputs.SubscriptionRetryPolicyArgs\n {\n MinimumBackoff = \"10s\",\n },\n EnableMessageOrdering = false,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: example.ID(),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tMessageRetentionDuration: pulumi.String(\"1200s\"),\n\t\t\tRetainAckedMessages: pulumi.Bool(true),\n\t\t\tAckDeadlineSeconds: pulumi.Int(20),\n\t\t\tExpirationPolicy: \u0026pubsub.SubscriptionExpirationPolicyArgs{\n\t\t\t\tTtl: pulumi.String(\"300000.5s\"),\n\t\t\t},\n\t\t\tRetryPolicy: \u0026pubsub.SubscriptionRetryPolicyArgs{\n\t\t\t\tMinimumBackoff: pulumi.String(\"10s\"),\n\t\t\t},\n\t\t\tEnableMessageOrdering: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionExpirationPolicyArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionRetryPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Topic(\"example\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(example.id())\n .labels(Map.of(\"foo\", \"bar\"))\n .messageRetentionDuration(\"1200s\")\n .retainAckedMessages(true)\n .ackDeadlineSeconds(20)\n .expirationPolicy(SubscriptionExpirationPolicyArgs.builder()\n .ttl(\"300000.5s\")\n .build())\n .retryPolicy(SubscriptionRetryPolicyArgs.builder()\n .minimumBackoff(\"10s\")\n .build())\n .enableMessageOrdering(false)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:pubsub:Topic\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${example.id}\n labels:\n foo: bar\n messageRetentionDuration: 1200s\n retainAckedMessages: true\n ackDeadlineSeconds: 20\n expirationPolicy:\n ttl: 300000.5s\n retryPolicy:\n minimumBackoff: 10s\n enableMessageOrdering: false\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Dead Letter\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst exampleDeadLetter = new gcp.pubsub.Topic(\"example_dead_letter\", {name: \"example-topic-dead-letter\"});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: example.id,\n deadLetterPolicy: {\n deadLetterTopic: exampleDeadLetter.id,\n maxDeliveryAttempts: 10,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nexample_dead_letter = gcp.pubsub.Topic(\"example_dead_letter\", name=\"example-topic-dead-letter\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example.id,\n dead_letter_policy={\n \"dead_letter_topic\": example_dead_letter.id,\n \"max_delivery_attempts\": 10,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var exampleDeadLetter = new Gcp.PubSub.Topic(\"example_dead_letter\", new()\n {\n Name = \"example-topic-dead-letter\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = example.Id,\n DeadLetterPolicy = new Gcp.PubSub.Inputs.SubscriptionDeadLetterPolicyArgs\n {\n DeadLetterTopic = exampleDeadLetter.Id,\n MaxDeliveryAttempts = 10,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\texampleDeadLetter, err := pubsub.NewTopic(ctx, \"example_dead_letter\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic-dead-letter\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: example.ID(),\n\t\t\tDeadLetterPolicy: \u0026pubsub.SubscriptionDeadLetterPolicyArgs{\n\t\t\t\tDeadLetterTopic: exampleDeadLetter.ID(),\n\t\t\t\tMaxDeliveryAttempts: pulumi.Int(10),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionDeadLetterPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Topic(\"example\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n var exampleDeadLetter = new Topic(\"exampleDeadLetter\", TopicArgs.builder()\n .name(\"example-topic-dead-letter\")\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(example.id())\n .deadLetterPolicy(SubscriptionDeadLetterPolicyArgs.builder()\n .deadLetterTopic(exampleDeadLetter.id())\n .maxDeliveryAttempts(10)\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:pubsub:Topic\n properties:\n name: example-topic\n exampleDeadLetter:\n type: gcp:pubsub:Topic\n name: example_dead_letter\n properties:\n name: example-topic-dead-letter\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${example.id}\n deadLetterPolicy:\n deadLetterTopic: ${exampleDeadLetter.id}\n maxDeliveryAttempts: 10\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Push Bq\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst project = gcp.organizations.getProject({});\nconst viewer = new gcp.projects.IAMMember(\"viewer\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/bigquery.metadataViewer\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com`),\n});\nconst editor = new gcp.projects.IAMMember(\"editor\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/bigquery.dataEditor\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com`),\n});\nconst test = new gcp.bigquery.Dataset(\"test\", {datasetId: \"example_dataset\"});\nconst testTable = new gcp.bigquery.Table(\"test\", {\n deletionProtection: false,\n tableId: \"example_table\",\n datasetId: test.datasetId,\n schema: `[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n`,\n});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: example.id,\n bigqueryConfig: {\n table: pulumi.interpolate`${testTable.project}.${testTable.datasetId}.${testTable.tableId}`,\n },\n}, {\n dependsOn: [\n viewer,\n editor,\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nproject = gcp.organizations.get_project()\nviewer = gcp.projects.IAMMember(\"viewer\",\n project=project.project_id,\n role=\"roles/bigquery.metadataViewer\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\")\neditor = gcp.projects.IAMMember(\"editor\",\n project=project.project_id,\n role=\"roles/bigquery.dataEditor\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\")\ntest = gcp.bigquery.Dataset(\"test\", dataset_id=\"example_dataset\")\ntest_table = gcp.bigquery.Table(\"test\",\n deletion_protection=False,\n table_id=\"example_table\",\n dataset_id=test.dataset_id,\n schema=\"\"\"[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n\"\"\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example.id,\n bigquery_config={\n \"table\": pulumi.Output.all(\n project=test_table.project,\n dataset_id=test_table.dataset_id,\n table_id=test_table.table_id\n).apply(lambda resolved_outputs: f\"{resolved_outputs['project']}.{resolved_outputs['dataset_id']}.{resolved_outputs['table_id']}\")\n,\n },\n opts = pulumi.ResourceOptions(depends_on=[\n viewer,\n editor,\n ]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var viewer = new Gcp.Projects.IAMMember(\"viewer\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/bigquery.metadataViewer\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-pubsub.iam.gserviceaccount.com\",\n });\n\n var editor = new Gcp.Projects.IAMMember(\"editor\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/bigquery.dataEditor\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-pubsub.iam.gserviceaccount.com\",\n });\n\n var test = new Gcp.BigQuery.Dataset(\"test\", new()\n {\n DatasetId = \"example_dataset\",\n });\n\n var testTable = new Gcp.BigQuery.Table(\"test\", new()\n {\n DeletionProtection = false,\n TableId = \"example_table\",\n DatasetId = test.DatasetId,\n Schema = @\"[\n {\n \"\"name\"\": \"\"data\"\",\n \"\"type\"\": \"\"STRING\"\",\n \"\"mode\"\": \"\"NULLABLE\"\",\n \"\"description\"\": \"\"The data\"\"\n }\n]\n\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = example.Id,\n BigqueryConfig = new Gcp.PubSub.Inputs.SubscriptionBigqueryConfigArgs\n {\n Table = Output.Tuple(testTable.Project, testTable.DatasetId, testTable.TableId).Apply(values =\u003e\n {\n var project = values.Item1;\n var datasetId = values.Item2;\n var tableId = values.Item3;\n return $\"{project}.{datasetId}.{tableId}\";\n }),\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n viewer,\n editor,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tviewer, err := projects.NewIAMMember(ctx, \"viewer\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/bigquery.metadataViewer\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-pubsub.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\teditor, err := projects.NewIAMMember(ctx, \"editor\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/bigquery.dataEditor\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-pubsub.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttest, err := bigquery.NewDataset(ctx, \"test\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"example_dataset\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttestTable, err := bigquery.NewTable(ctx, \"test\", \u0026bigquery.TableArgs{\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tTableId: pulumi.String(\"example_table\"),\n\t\t\tDatasetId: test.DatasetId,\n\t\t\tSchema: pulumi.String(`[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n`),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: example.ID(),\n\t\t\tBigqueryConfig: \u0026pubsub.SubscriptionBigqueryConfigArgs{\n\t\t\t\tTable: pulumi.All(testTable.Project, testTable.DatasetId, testTable.TableId).ApplyT(func(_args []interface{}) (string, error) {\n\t\t\t\t\tproject := _args[0].(string)\n\t\t\t\t\tdatasetId := _args[1].(string)\n\t\t\t\t\ttableId := _args[2].(string)\n\t\t\t\t\treturn fmt.Sprintf(\"%v.%v.%v\", project, datasetId, tableId), nil\n\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tviewer,\n\t\t\teditor,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.projects.IAMMember;\nimport com.pulumi.gcp.projects.IAMMemberArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigquery.Table;\nimport com.pulumi.gcp.bigquery.TableArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionBigqueryConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Topic(\"example\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n var viewer = new IAMMember(\"viewer\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/bigquery.metadataViewer\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-pubsub.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var editor = new IAMMember(\"editor\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/bigquery.dataEditor\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-pubsub.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var test = new Dataset(\"test\", DatasetArgs.builder()\n .datasetId(\"example_dataset\")\n .build());\n\n var testTable = new Table(\"testTable\", TableArgs.builder()\n .deletionProtection(false)\n .tableId(\"example_table\")\n .datasetId(test.datasetId())\n .schema(\"\"\"\n[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n \"\"\")\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(example.id())\n .bigqueryConfig(SubscriptionBigqueryConfigArgs.builder()\n .table(Output.tuple(testTable.project(), testTable.datasetId(), testTable.tableId()).applyValue(values -\u003e {\n var project = values.t1;\n var datasetId = values.t2;\n var tableId = values.t3;\n return String.format(\"%s.%s.%s\", project.applyValue(getProjectResult -\u003e getProjectResult),datasetId,tableId);\n }))\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn( \n viewer,\n editor)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:pubsub:Topic\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${example.id}\n bigqueryConfig:\n table: ${testTable.project}.${testTable.datasetId}.${testTable.tableId}\n options:\n dependson:\n - ${viewer}\n - ${editor}\n viewer:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/bigquery.metadataViewer\n member: serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\n editor:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/bigquery.dataEditor\n member: serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\n test:\n type: gcp:bigquery:Dataset\n properties:\n datasetId: example_dataset\n testTable:\n type: gcp:bigquery:Table\n name: test\n properties:\n deletionProtection: false\n tableId: example_table\n datasetId: ${test.datasetId}\n schema: |\n [\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n ]\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Push Bq Table Schema\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst project = gcp.organizations.getProject({});\nconst viewer = new gcp.projects.IAMMember(\"viewer\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/bigquery.metadataViewer\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com`),\n});\nconst editor = new gcp.projects.IAMMember(\"editor\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/bigquery.dataEditor\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com`),\n});\nconst test = new gcp.bigquery.Dataset(\"test\", {datasetId: \"example_dataset\"});\nconst testTable = new gcp.bigquery.Table(\"test\", {\n deletionProtection: false,\n tableId: \"example_table\",\n datasetId: test.datasetId,\n schema: `[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n`,\n});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: example.id,\n bigqueryConfig: {\n table: pulumi.interpolate`${testTable.project}.${testTable.datasetId}.${testTable.tableId}`,\n useTableSchema: true,\n },\n}, {\n dependsOn: [\n viewer,\n editor,\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nproject = gcp.organizations.get_project()\nviewer = gcp.projects.IAMMember(\"viewer\",\n project=project.project_id,\n role=\"roles/bigquery.metadataViewer\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\")\neditor = gcp.projects.IAMMember(\"editor\",\n project=project.project_id,\n role=\"roles/bigquery.dataEditor\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\")\ntest = gcp.bigquery.Dataset(\"test\", dataset_id=\"example_dataset\")\ntest_table = gcp.bigquery.Table(\"test\",\n deletion_protection=False,\n table_id=\"example_table\",\n dataset_id=test.dataset_id,\n schema=\"\"\"[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n\"\"\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example.id,\n bigquery_config={\n \"table\": pulumi.Output.all(\n project=test_table.project,\n dataset_id=test_table.dataset_id,\n table_id=test_table.table_id\n).apply(lambda resolved_outputs: f\"{resolved_outputs['project']}.{resolved_outputs['dataset_id']}.{resolved_outputs['table_id']}\")\n,\n \"use_table_schema\": True,\n },\n opts = pulumi.ResourceOptions(depends_on=[\n viewer,\n editor,\n ]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var viewer = new Gcp.Projects.IAMMember(\"viewer\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/bigquery.metadataViewer\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-pubsub.iam.gserviceaccount.com\",\n });\n\n var editor = new Gcp.Projects.IAMMember(\"editor\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/bigquery.dataEditor\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-pubsub.iam.gserviceaccount.com\",\n });\n\n var test = new Gcp.BigQuery.Dataset(\"test\", new()\n {\n DatasetId = \"example_dataset\",\n });\n\n var testTable = new Gcp.BigQuery.Table(\"test\", new()\n {\n DeletionProtection = false,\n TableId = \"example_table\",\n DatasetId = test.DatasetId,\n Schema = @\"[\n {\n \"\"name\"\": \"\"data\"\",\n \"\"type\"\": \"\"STRING\"\",\n \"\"mode\"\": \"\"NULLABLE\"\",\n \"\"description\"\": \"\"The data\"\"\n }\n]\n\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = example.Id,\n BigqueryConfig = new Gcp.PubSub.Inputs.SubscriptionBigqueryConfigArgs\n {\n Table = Output.Tuple(testTable.Project, testTable.DatasetId, testTable.TableId).Apply(values =\u003e\n {\n var project = values.Item1;\n var datasetId = values.Item2;\n var tableId = values.Item3;\n return $\"{project}.{datasetId}.{tableId}\";\n }),\n UseTableSchema = true,\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n viewer,\n editor,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tviewer, err := projects.NewIAMMember(ctx, \"viewer\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/bigquery.metadataViewer\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-pubsub.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\teditor, err := projects.NewIAMMember(ctx, \"editor\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/bigquery.dataEditor\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-pubsub.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttest, err := bigquery.NewDataset(ctx, \"test\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"example_dataset\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttestTable, err := bigquery.NewTable(ctx, \"test\", \u0026bigquery.TableArgs{\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tTableId: pulumi.String(\"example_table\"),\n\t\t\tDatasetId: test.DatasetId,\n\t\t\tSchema: pulumi.String(`[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n`),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: example.ID(),\n\t\t\tBigqueryConfig: \u0026pubsub.SubscriptionBigqueryConfigArgs{\n\t\t\t\tTable: pulumi.All(testTable.Project, testTable.DatasetId, testTable.TableId).ApplyT(func(_args []interface{}) (string, error) {\n\t\t\t\t\tproject := _args[0].(string)\n\t\t\t\t\tdatasetId := _args[1].(string)\n\t\t\t\t\ttableId := _args[2].(string)\n\t\t\t\t\treturn fmt.Sprintf(\"%v.%v.%v\", project, datasetId, tableId), nil\n\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\tUseTableSchema: pulumi.Bool(true),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tviewer,\n\t\t\teditor,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.projects.IAMMember;\nimport com.pulumi.gcp.projects.IAMMemberArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigquery.Table;\nimport com.pulumi.gcp.bigquery.TableArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionBigqueryConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Topic(\"example\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n var viewer = new IAMMember(\"viewer\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/bigquery.metadataViewer\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-pubsub.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var editor = new IAMMember(\"editor\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/bigquery.dataEditor\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-pubsub.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var test = new Dataset(\"test\", DatasetArgs.builder()\n .datasetId(\"example_dataset\")\n .build());\n\n var testTable = new Table(\"testTable\", TableArgs.builder()\n .deletionProtection(false)\n .tableId(\"example_table\")\n .datasetId(test.datasetId())\n .schema(\"\"\"\n[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n \"\"\")\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(example.id())\n .bigqueryConfig(SubscriptionBigqueryConfigArgs.builder()\n .table(Output.tuple(testTable.project(), testTable.datasetId(), testTable.tableId()).applyValue(values -\u003e {\n var project = values.t1;\n var datasetId = values.t2;\n var tableId = values.t3;\n return String.format(\"%s.%s.%s\", project.applyValue(getProjectResult -\u003e getProjectResult),datasetId,tableId);\n }))\n .useTableSchema(true)\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn( \n viewer,\n editor)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:pubsub:Topic\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${example.id}\n bigqueryConfig:\n table: ${testTable.project}.${testTable.datasetId}.${testTable.tableId}\n useTableSchema: true\n options:\n dependson:\n - ${viewer}\n - ${editor}\n viewer:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/bigquery.metadataViewer\n member: serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\n editor:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/bigquery.dataEditor\n member: serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\n test:\n type: gcp:bigquery:Dataset\n properties:\n datasetId: example_dataset\n testTable:\n type: gcp:bigquery:Table\n name: test\n properties:\n deletionProtection: false\n tableId: example_table\n datasetId: ${test.datasetId}\n schema: |\n [\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n ]\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Push Bq Service Account\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst bqWriteServiceAccount = new gcp.serviceaccount.Account(\"bq_write_service_account\", {\n accountId: \"example-bqw\",\n displayName: \"BQ Write Service Account\",\n});\nconst project = gcp.organizations.getProject({});\nconst viewer = new gcp.projects.IAMMember(\"viewer\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/bigquery.metadataViewer\",\n member: pulumi.interpolate`serviceAccount:${bqWriteServiceAccount.email}`,\n});\nconst editor = new gcp.projects.IAMMember(\"editor\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/bigquery.dataEditor\",\n member: pulumi.interpolate`serviceAccount:${bqWriteServiceAccount.email}`,\n});\nconst test = new gcp.bigquery.Dataset(\"test\", {datasetId: \"example_dataset\"});\nconst testTable = new gcp.bigquery.Table(\"test\", {\n deletionProtection: false,\n tableId: \"example_table\",\n datasetId: test.datasetId,\n schema: `[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n`,\n});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: example.id,\n bigqueryConfig: {\n table: pulumi.interpolate`${testTable.project}.${testTable.datasetId}.${testTable.tableId}`,\n serviceAccountEmail: bqWriteServiceAccount.email,\n },\n}, {\n dependsOn: [\n bqWriteServiceAccount,\n viewer,\n editor,\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nbq_write_service_account = gcp.serviceaccount.Account(\"bq_write_service_account\",\n account_id=\"example-bqw\",\n display_name=\"BQ Write Service Account\")\nproject = gcp.organizations.get_project()\nviewer = gcp.projects.IAMMember(\"viewer\",\n project=project.project_id,\n role=\"roles/bigquery.metadataViewer\",\n member=bq_write_service_account.email.apply(lambda email: f\"serviceAccount:{email}\"))\neditor = gcp.projects.IAMMember(\"editor\",\n project=project.project_id,\n role=\"roles/bigquery.dataEditor\",\n member=bq_write_service_account.email.apply(lambda email: f\"serviceAccount:{email}\"))\ntest = gcp.bigquery.Dataset(\"test\", dataset_id=\"example_dataset\")\ntest_table = gcp.bigquery.Table(\"test\",\n deletion_protection=False,\n table_id=\"example_table\",\n dataset_id=test.dataset_id,\n schema=\"\"\"[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n\"\"\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example.id,\n bigquery_config={\n \"table\": pulumi.Output.all(\n project=test_table.project,\n dataset_id=test_table.dataset_id,\n table_id=test_table.table_id\n).apply(lambda resolved_outputs: f\"{resolved_outputs['project']}.{resolved_outputs['dataset_id']}.{resolved_outputs['table_id']}\")\n,\n \"service_account_email\": bq_write_service_account.email,\n },\n opts = pulumi.ResourceOptions(depends_on=[\n bq_write_service_account,\n viewer,\n editor,\n ]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var bqWriteServiceAccount = new Gcp.ServiceAccount.Account(\"bq_write_service_account\", new()\n {\n AccountId = \"example-bqw\",\n DisplayName = \"BQ Write Service Account\",\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var viewer = new Gcp.Projects.IAMMember(\"viewer\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/bigquery.metadataViewer\",\n Member = bqWriteServiceAccount.Email.Apply(email =\u003e $\"serviceAccount:{email}\"),\n });\n\n var editor = new Gcp.Projects.IAMMember(\"editor\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/bigquery.dataEditor\",\n Member = bqWriteServiceAccount.Email.Apply(email =\u003e $\"serviceAccount:{email}\"),\n });\n\n var test = new Gcp.BigQuery.Dataset(\"test\", new()\n {\n DatasetId = \"example_dataset\",\n });\n\n var testTable = new Gcp.BigQuery.Table(\"test\", new()\n {\n DeletionProtection = false,\n TableId = \"example_table\",\n DatasetId = test.DatasetId,\n Schema = @\"[\n {\n \"\"name\"\": \"\"data\"\",\n \"\"type\"\": \"\"STRING\"\",\n \"\"mode\"\": \"\"NULLABLE\"\",\n \"\"description\"\": \"\"The data\"\"\n }\n]\n\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = example.Id,\n BigqueryConfig = new Gcp.PubSub.Inputs.SubscriptionBigqueryConfigArgs\n {\n Table = Output.Tuple(testTable.Project, testTable.DatasetId, testTable.TableId).Apply(values =\u003e\n {\n var project = values.Item1;\n var datasetId = values.Item2;\n var tableId = values.Item3;\n return $\"{project}.{datasetId}.{tableId}\";\n }),\n ServiceAccountEmail = bqWriteServiceAccount.Email,\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n bqWriteServiceAccount,\n viewer,\n editor,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbqWriteServiceAccount, err := serviceaccount.NewAccount(ctx, \"bq_write_service_account\", \u0026serviceaccount.AccountArgs{\n\t\t\tAccountId: pulumi.String(\"example-bqw\"),\n\t\t\tDisplayName: pulumi.String(\"BQ Write Service Account\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tviewer, err := projects.NewIAMMember(ctx, \"viewer\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/bigquery.metadataViewer\"),\n\t\t\tMember: bqWriteServiceAccount.Email.ApplyT(func(email string) (string, error) {\n\t\t\t\treturn fmt.Sprintf(\"serviceAccount:%v\", email), nil\n\t\t\t}).(pulumi.StringOutput),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\teditor, err := projects.NewIAMMember(ctx, \"editor\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/bigquery.dataEditor\"),\n\t\t\tMember: bqWriteServiceAccount.Email.ApplyT(func(email string) (string, error) {\n\t\t\t\treturn fmt.Sprintf(\"serviceAccount:%v\", email), nil\n\t\t\t}).(pulumi.StringOutput),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttest, err := bigquery.NewDataset(ctx, \"test\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"example_dataset\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttestTable, err := bigquery.NewTable(ctx, \"test\", \u0026bigquery.TableArgs{\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tTableId: pulumi.String(\"example_table\"),\n\t\t\tDatasetId: test.DatasetId,\n\t\t\tSchema: pulumi.String(`[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n`),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: example.ID(),\n\t\t\tBigqueryConfig: \u0026pubsub.SubscriptionBigqueryConfigArgs{\n\t\t\t\tTable: pulumi.All(testTable.Project, testTable.DatasetId, testTable.TableId).ApplyT(func(_args []interface{}) (string, error) {\n\t\t\t\t\tproject := _args[0].(string)\n\t\t\t\t\tdatasetId := _args[1].(string)\n\t\t\t\t\ttableId := _args[2].(string)\n\t\t\t\t\treturn fmt.Sprintf(\"%v.%v.%v\", project, datasetId, tableId), nil\n\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\tServiceAccountEmail: bqWriteServiceAccount.Email,\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tbqWriteServiceAccount,\n\t\t\tviewer,\n\t\t\teditor,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.serviceaccount.Account;\nimport com.pulumi.gcp.serviceaccount.AccountArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.projects.IAMMember;\nimport com.pulumi.gcp.projects.IAMMemberArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigquery.Table;\nimport com.pulumi.gcp.bigquery.TableArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionBigqueryConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Topic(\"example\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n var bqWriteServiceAccount = new Account(\"bqWriteServiceAccount\", AccountArgs.builder()\n .accountId(\"example-bqw\")\n .displayName(\"BQ Write Service Account\")\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n var viewer = new IAMMember(\"viewer\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/bigquery.metadataViewer\")\n .member(bqWriteServiceAccount.email().applyValue(email -\u003e String.format(\"serviceAccount:%s\", email)))\n .build());\n\n var editor = new IAMMember(\"editor\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/bigquery.dataEditor\")\n .member(bqWriteServiceAccount.email().applyValue(email -\u003e String.format(\"serviceAccount:%s\", email)))\n .build());\n\n var test = new Dataset(\"test\", DatasetArgs.builder()\n .datasetId(\"example_dataset\")\n .build());\n\n var testTable = new Table(\"testTable\", TableArgs.builder()\n .deletionProtection(false)\n .tableId(\"example_table\")\n .datasetId(test.datasetId())\n .schema(\"\"\"\n[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n \"\"\")\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(example.id())\n .bigqueryConfig(SubscriptionBigqueryConfigArgs.builder()\n .table(Output.tuple(testTable.project(), testTable.datasetId(), testTable.tableId()).applyValue(values -\u003e {\n var project = values.t1;\n var datasetId = values.t2;\n var tableId = values.t3;\n return String.format(\"%s.%s.%s\", project.applyValue(getProjectResult -\u003e getProjectResult),datasetId,tableId);\n }))\n .serviceAccountEmail(bqWriteServiceAccount.email())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn( \n bqWriteServiceAccount,\n viewer,\n editor)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:pubsub:Topic\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${example.id}\n bigqueryConfig:\n table: ${testTable.project}.${testTable.datasetId}.${testTable.tableId}\n serviceAccountEmail: ${bqWriteServiceAccount.email}\n options:\n dependson:\n - ${bqWriteServiceAccount}\n - ${viewer}\n - ${editor}\n bqWriteServiceAccount:\n type: gcp:serviceaccount:Account\n name: bq_write_service_account\n properties:\n accountId: example-bqw\n displayName: BQ Write Service Account\n viewer:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/bigquery.metadataViewer\n member: serviceAccount:${bqWriteServiceAccount.email}\n editor:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/bigquery.dataEditor\n member: serviceAccount:${bqWriteServiceAccount.email}\n test:\n type: gcp:bigquery:Dataset\n properties:\n datasetId: example_dataset\n testTable:\n type: gcp:bigquery:Table\n name: test\n properties:\n deletionProtection: false\n tableId: example_table\n datasetId: ${test.datasetId}\n schema: |\n [\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n ]\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Push Cloudstorage\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.storage.Bucket(\"example\", {\n name: \"example-bucket\",\n location: \"US\",\n uniformBucketLevelAccess: true,\n});\nconst exampleTopic = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst project = gcp.organizations.getProject({});\nconst admin = new gcp.storage.BucketIAMMember(\"admin\", {\n bucket: example.name,\n role: \"roles/storage.admin\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com`),\n});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: exampleTopic.id,\n cloudStorageConfig: {\n bucket: example.name,\n filenamePrefix: \"pre-\",\n filenameSuffix: \"-_91980\",\n filenameDatetimeFormat: \"YYYY-MM-DD/hh_mm_ssZ\",\n maxBytes: 1000,\n maxDuration: \"300s\",\n },\n}, {\n dependsOn: [\n example,\n admin,\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.storage.Bucket(\"example\",\n name=\"example-bucket\",\n location=\"US\",\n uniform_bucket_level_access=True)\nexample_topic = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nproject = gcp.organizations.get_project()\nadmin = gcp.storage.BucketIAMMember(\"admin\",\n bucket=example.name,\n role=\"roles/storage.admin\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example_topic.id,\n cloud_storage_config={\n \"bucket\": example.name,\n \"filename_prefix\": \"pre-\",\n \"filename_suffix\": \"-_91980\",\n \"filename_datetime_format\": \"YYYY-MM-DD/hh_mm_ssZ\",\n \"max_bytes\": 1000,\n \"max_duration\": \"300s\",\n },\n opts = pulumi.ResourceOptions(depends_on=[\n example,\n admin,\n ]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.Storage.Bucket(\"example\", new()\n {\n Name = \"example-bucket\",\n Location = \"US\",\n UniformBucketLevelAccess = true,\n });\n\n var exampleTopic = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var admin = new Gcp.Storage.BucketIAMMember(\"admin\", new()\n {\n Bucket = example.Name,\n Role = \"roles/storage.admin\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-pubsub.iam.gserviceaccount.com\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = exampleTopic.Id,\n CloudStorageConfig = new Gcp.PubSub.Inputs.SubscriptionCloudStorageConfigArgs\n {\n Bucket = example.Name,\n FilenamePrefix = \"pre-\",\n FilenameSuffix = \"-_91980\",\n FilenameDatetimeFormat = \"YYYY-MM-DD/hh_mm_ssZ\",\n MaxBytes = 1000,\n MaxDuration = \"300s\",\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n example,\n admin,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := storage.NewBucket(ctx, \"example\", \u0026storage.BucketArgs{\n\t\t\tName: pulumi.String(\"example-bucket\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tUniformBucketLevelAccess: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\texampleTopic, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tadmin, err := storage.NewBucketIAMMember(ctx, \"admin\", \u0026storage.BucketIAMMemberArgs{\n\t\t\tBucket: example.Name,\n\t\t\tRole: pulumi.String(\"roles/storage.admin\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-pubsub.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: exampleTopic.ID(),\n\t\t\tCloudStorageConfig: \u0026pubsub.SubscriptionCloudStorageConfigArgs{\n\t\t\t\tBucket: example.Name,\n\t\t\t\tFilenamePrefix: pulumi.String(\"pre-\"),\n\t\t\t\tFilenameSuffix: pulumi.String(\"-_91980\"),\n\t\t\t\tFilenameDatetimeFormat: pulumi.String(\"YYYY-MM-DD/hh_mm_ssZ\"),\n\t\t\t\tMaxBytes: pulumi.Int(1000),\n\t\t\t\tMaxDuration: pulumi.String(\"300s\"),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\texample,\n\t\t\tadmin,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.storage.Bucket;\nimport com.pulumi.gcp.storage.BucketArgs;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.storage.BucketIAMMember;\nimport com.pulumi.gcp.storage.BucketIAMMemberArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionCloudStorageConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Bucket(\"example\", BucketArgs.builder()\n .name(\"example-bucket\")\n .location(\"US\")\n .uniformBucketLevelAccess(true)\n .build());\n\n var exampleTopic = new Topic(\"exampleTopic\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n var admin = new BucketIAMMember(\"admin\", BucketIAMMemberArgs.builder()\n .bucket(example.name())\n .role(\"roles/storage.admin\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-pubsub.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(exampleTopic.id())\n .cloudStorageConfig(SubscriptionCloudStorageConfigArgs.builder()\n .bucket(example.name())\n .filenamePrefix(\"pre-\")\n .filenameSuffix(\"-_91980\")\n .filenameDatetimeFormat(\"YYYY-MM-DD/hh_mm_ssZ\")\n .maxBytes(1000)\n .maxDuration(\"300s\")\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn( \n example,\n admin)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:storage:Bucket\n properties:\n name: example-bucket\n location: US\n uniformBucketLevelAccess: true\n exampleTopic:\n type: gcp:pubsub:Topic\n name: example\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${exampleTopic.id}\n cloudStorageConfig:\n bucket: ${example.name}\n filenamePrefix: pre-\n filenameSuffix: -_91980\n filenameDatetimeFormat: YYYY-MM-DD/hh_mm_ssZ\n maxBytes: 1000\n maxDuration: 300s\n options:\n dependson:\n - ${example}\n - ${admin}\n admin:\n type: gcp:storage:BucketIAMMember\n properties:\n bucket: ${example.name}\n role: roles/storage.admin\n member: serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Push Cloudstorage Avro\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.storage.Bucket(\"example\", {\n name: \"example-bucket\",\n location: \"US\",\n uniformBucketLevelAccess: true,\n});\nconst exampleTopic = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst project = gcp.organizations.getProject({});\nconst admin = new gcp.storage.BucketIAMMember(\"admin\", {\n bucket: example.name,\n role: \"roles/storage.admin\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com`),\n});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: exampleTopic.id,\n cloudStorageConfig: {\n bucket: example.name,\n filenamePrefix: \"pre-\",\n filenameSuffix: \"-_37118\",\n filenameDatetimeFormat: \"YYYY-MM-DD/hh_mm_ssZ\",\n maxBytes: 1000,\n maxDuration: \"300s\",\n avroConfig: {\n writeMetadata: true,\n },\n },\n}, {\n dependsOn: [\n example,\n admin,\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.storage.Bucket(\"example\",\n name=\"example-bucket\",\n location=\"US\",\n uniform_bucket_level_access=True)\nexample_topic = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nproject = gcp.organizations.get_project()\nadmin = gcp.storage.BucketIAMMember(\"admin\",\n bucket=example.name,\n role=\"roles/storage.admin\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example_topic.id,\n cloud_storage_config={\n \"bucket\": example.name,\n \"filename_prefix\": \"pre-\",\n \"filename_suffix\": \"-_37118\",\n \"filename_datetime_format\": \"YYYY-MM-DD/hh_mm_ssZ\",\n \"max_bytes\": 1000,\n \"max_duration\": \"300s\",\n \"avro_config\": {\n \"write_metadata\": True,\n },\n },\n opts = pulumi.ResourceOptions(depends_on=[\n example,\n admin,\n ]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.Storage.Bucket(\"example\", new()\n {\n Name = \"example-bucket\",\n Location = \"US\",\n UniformBucketLevelAccess = true,\n });\n\n var exampleTopic = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var admin = new Gcp.Storage.BucketIAMMember(\"admin\", new()\n {\n Bucket = example.Name,\n Role = \"roles/storage.admin\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-pubsub.iam.gserviceaccount.com\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = exampleTopic.Id,\n CloudStorageConfig = new Gcp.PubSub.Inputs.SubscriptionCloudStorageConfigArgs\n {\n Bucket = example.Name,\n FilenamePrefix = \"pre-\",\n FilenameSuffix = \"-_37118\",\n FilenameDatetimeFormat = \"YYYY-MM-DD/hh_mm_ssZ\",\n MaxBytes = 1000,\n MaxDuration = \"300s\",\n AvroConfig = new Gcp.PubSub.Inputs.SubscriptionCloudStorageConfigAvroConfigArgs\n {\n WriteMetadata = true,\n },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n example,\n admin,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := storage.NewBucket(ctx, \"example\", \u0026storage.BucketArgs{\n\t\t\tName: pulumi.String(\"example-bucket\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tUniformBucketLevelAccess: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\texampleTopic, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tadmin, err := storage.NewBucketIAMMember(ctx, \"admin\", \u0026storage.BucketIAMMemberArgs{\n\t\t\tBucket: example.Name,\n\t\t\tRole: pulumi.String(\"roles/storage.admin\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-pubsub.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: exampleTopic.ID(),\n\t\t\tCloudStorageConfig: \u0026pubsub.SubscriptionCloudStorageConfigArgs{\n\t\t\t\tBucket: example.Name,\n\t\t\t\tFilenamePrefix: pulumi.String(\"pre-\"),\n\t\t\t\tFilenameSuffix: pulumi.String(\"-_37118\"),\n\t\t\t\tFilenameDatetimeFormat: pulumi.String(\"YYYY-MM-DD/hh_mm_ssZ\"),\n\t\t\t\tMaxBytes: pulumi.Int(1000),\n\t\t\t\tMaxDuration: pulumi.String(\"300s\"),\n\t\t\t\tAvroConfig: \u0026pubsub.SubscriptionCloudStorageConfigAvroConfigArgs{\n\t\t\t\t\tWriteMetadata: pulumi.Bool(true),\n\t\t\t\t},\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\texample,\n\t\t\tadmin,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.storage.Bucket;\nimport com.pulumi.gcp.storage.BucketArgs;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.storage.BucketIAMMember;\nimport com.pulumi.gcp.storage.BucketIAMMemberArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionCloudStorageConfigArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionCloudStorageConfigAvroConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Bucket(\"example\", BucketArgs.builder()\n .name(\"example-bucket\")\n .location(\"US\")\n .uniformBucketLevelAccess(true)\n .build());\n\n var exampleTopic = new Topic(\"exampleTopic\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n var admin = new BucketIAMMember(\"admin\", BucketIAMMemberArgs.builder()\n .bucket(example.name())\n .role(\"roles/storage.admin\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-pubsub.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(exampleTopic.id())\n .cloudStorageConfig(SubscriptionCloudStorageConfigArgs.builder()\n .bucket(example.name())\n .filenamePrefix(\"pre-\")\n .filenameSuffix(\"-_37118\")\n .filenameDatetimeFormat(\"YYYY-MM-DD/hh_mm_ssZ\")\n .maxBytes(1000)\n .maxDuration(\"300s\")\n .avroConfig(SubscriptionCloudStorageConfigAvroConfigArgs.builder()\n .writeMetadata(true)\n .build())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn( \n example,\n admin)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:storage:Bucket\n properties:\n name: example-bucket\n location: US\n uniformBucketLevelAccess: true\n exampleTopic:\n type: gcp:pubsub:Topic\n name: example\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${exampleTopic.id}\n cloudStorageConfig:\n bucket: ${example.name}\n filenamePrefix: pre-\n filenameSuffix: -_37118\n filenameDatetimeFormat: YYYY-MM-DD/hh_mm_ssZ\n maxBytes: 1000\n maxDuration: 300s\n avroConfig:\n writeMetadata: true\n options:\n dependson:\n - ${example}\n - ${admin}\n admin:\n type: gcp:storage:BucketIAMMember\n properties:\n bucket: ${example.name}\n role: roles/storage.admin\n member: serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Push Cloudstorage Service Account\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.storage.Bucket(\"example\", {\n name: \"example-bucket\",\n location: \"US\",\n uniformBucketLevelAccess: true,\n});\nconst exampleTopic = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst storageWriteServiceAccount = new gcp.serviceaccount.Account(\"storage_write_service_account\", {\n accountId: \"example-stw\",\n displayName: \"Storage Write Service Account\",\n});\nconst admin = new gcp.storage.BucketIAMMember(\"admin\", {\n bucket: example.name,\n role: \"roles/storage.admin\",\n member: pulumi.interpolate`serviceAccount:${storageWriteServiceAccount.email}`,\n});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: exampleTopic.id,\n cloudStorageConfig: {\n bucket: example.name,\n filenamePrefix: \"pre-\",\n filenameSuffix: \"-_80332\",\n filenameDatetimeFormat: \"YYYY-MM-DD/hh_mm_ssZ\",\n maxBytes: 1000,\n maxDuration: \"300s\",\n serviceAccountEmail: storageWriteServiceAccount.email,\n },\n}, {\n dependsOn: [\n storageWriteServiceAccount,\n example,\n admin,\n ],\n});\nconst project = gcp.organizations.getProject({});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.storage.Bucket(\"example\",\n name=\"example-bucket\",\n location=\"US\",\n uniform_bucket_level_access=True)\nexample_topic = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nstorage_write_service_account = gcp.serviceaccount.Account(\"storage_write_service_account\",\n account_id=\"example-stw\",\n display_name=\"Storage Write Service Account\")\nadmin = gcp.storage.BucketIAMMember(\"admin\",\n bucket=example.name,\n role=\"roles/storage.admin\",\n member=storage_write_service_account.email.apply(lambda email: f\"serviceAccount:{email}\"))\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example_topic.id,\n cloud_storage_config={\n \"bucket\": example.name,\n \"filename_prefix\": \"pre-\",\n \"filename_suffix\": \"-_80332\",\n \"filename_datetime_format\": \"YYYY-MM-DD/hh_mm_ssZ\",\n \"max_bytes\": 1000,\n \"max_duration\": \"300s\",\n \"service_account_email\": storage_write_service_account.email,\n },\n opts = pulumi.ResourceOptions(depends_on=[\n storage_write_service_account,\n example,\n admin,\n ]))\nproject = gcp.organizations.get_project()\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.Storage.Bucket(\"example\", new()\n {\n Name = \"example-bucket\",\n Location = \"US\",\n UniformBucketLevelAccess = true,\n });\n\n var exampleTopic = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var storageWriteServiceAccount = new Gcp.ServiceAccount.Account(\"storage_write_service_account\", new()\n {\n AccountId = \"example-stw\",\n DisplayName = \"Storage Write Service Account\",\n });\n\n var admin = new Gcp.Storage.BucketIAMMember(\"admin\", new()\n {\n Bucket = example.Name,\n Role = \"roles/storage.admin\",\n Member = storageWriteServiceAccount.Email.Apply(email =\u003e $\"serviceAccount:{email}\"),\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = exampleTopic.Id,\n CloudStorageConfig = new Gcp.PubSub.Inputs.SubscriptionCloudStorageConfigArgs\n {\n Bucket = example.Name,\n FilenamePrefix = \"pre-\",\n FilenameSuffix = \"-_80332\",\n FilenameDatetimeFormat = \"YYYY-MM-DD/hh_mm_ssZ\",\n MaxBytes = 1000,\n MaxDuration = \"300s\",\n ServiceAccountEmail = storageWriteServiceAccount.Email,\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n storageWriteServiceAccount,\n example,\n admin,\n },\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := storage.NewBucket(ctx, \"example\", \u0026storage.BucketArgs{\n\t\t\tName: pulumi.String(\"example-bucket\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tUniformBucketLevelAccess: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\texampleTopic, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstorageWriteServiceAccount, err := serviceaccount.NewAccount(ctx, \"storage_write_service_account\", \u0026serviceaccount.AccountArgs{\n\t\t\tAccountId: pulumi.String(\"example-stw\"),\n\t\t\tDisplayName: pulumi.String(\"Storage Write Service Account\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tadmin, err := storage.NewBucketIAMMember(ctx, \"admin\", \u0026storage.BucketIAMMemberArgs{\n\t\t\tBucket: example.Name,\n\t\t\tRole: pulumi.String(\"roles/storage.admin\"),\n\t\t\tMember: storageWriteServiceAccount.Email.ApplyT(func(email string) (string, error) {\n\t\t\t\treturn fmt.Sprintf(\"serviceAccount:%v\", email), nil\n\t\t\t}).(pulumi.StringOutput),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: exampleTopic.ID(),\n\t\t\tCloudStorageConfig: \u0026pubsub.SubscriptionCloudStorageConfigArgs{\n\t\t\t\tBucket: example.Name,\n\t\t\t\tFilenamePrefix: pulumi.String(\"pre-\"),\n\t\t\t\tFilenameSuffix: pulumi.String(\"-_80332\"),\n\t\t\t\tFilenameDatetimeFormat: pulumi.String(\"YYYY-MM-DD/hh_mm_ssZ\"),\n\t\t\t\tMaxBytes: pulumi.Int(1000),\n\t\t\t\tMaxDuration: pulumi.String(\"300s\"),\n\t\t\t\tServiceAccountEmail: storageWriteServiceAccount.Email,\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tstorageWriteServiceAccount,\n\t\t\texample,\n\t\t\tadmin,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.storage.Bucket;\nimport com.pulumi.gcp.storage.BucketArgs;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.serviceaccount.Account;\nimport com.pulumi.gcp.serviceaccount.AccountArgs;\nimport com.pulumi.gcp.storage.BucketIAMMember;\nimport com.pulumi.gcp.storage.BucketIAMMemberArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionCloudStorageConfigArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Bucket(\"example\", BucketArgs.builder()\n .name(\"example-bucket\")\n .location(\"US\")\n .uniformBucketLevelAccess(true)\n .build());\n\n var exampleTopic = new Topic(\"exampleTopic\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n var storageWriteServiceAccount = new Account(\"storageWriteServiceAccount\", AccountArgs.builder()\n .accountId(\"example-stw\")\n .displayName(\"Storage Write Service Account\")\n .build());\n\n var admin = new BucketIAMMember(\"admin\", BucketIAMMemberArgs.builder()\n .bucket(example.name())\n .role(\"roles/storage.admin\")\n .member(storageWriteServiceAccount.email().applyValue(email -\u003e String.format(\"serviceAccount:%s\", email)))\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(exampleTopic.id())\n .cloudStorageConfig(SubscriptionCloudStorageConfigArgs.builder()\n .bucket(example.name())\n .filenamePrefix(\"pre-\")\n .filenameSuffix(\"-_80332\")\n .filenameDatetimeFormat(\"YYYY-MM-DD/hh_mm_ssZ\")\n .maxBytes(1000)\n .maxDuration(\"300s\")\n .serviceAccountEmail(storageWriteServiceAccount.email())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn( \n storageWriteServiceAccount,\n example,\n admin)\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:storage:Bucket\n properties:\n name: example-bucket\n location: US\n uniformBucketLevelAccess: true\n exampleTopic:\n type: gcp:pubsub:Topic\n name: example\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${exampleTopic.id}\n cloudStorageConfig:\n bucket: ${example.name}\n filenamePrefix: pre-\n filenameSuffix: -_80332\n filenameDatetimeFormat: YYYY-MM-DD/hh_mm_ssZ\n maxBytes: 1000\n maxDuration: 300s\n serviceAccountEmail: ${storageWriteServiceAccount.email}\n options:\n dependson:\n - ${storageWriteServiceAccount}\n - ${example}\n - ${admin}\n storageWriteServiceAccount:\n type: gcp:serviceaccount:Account\n name: storage_write_service_account\n properties:\n accountId: example-stw\n displayName: Storage Write Service Account\n admin:\n type: gcp:storage:BucketIAMMember\n properties:\n bucket: ${example.name}\n role: roles/storage.admin\n member: serviceAccount:${storageWriteServiceAccount.email}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nSubscription can be imported using any of these accepted formats:\n\n* `projects/{{project}}/subscriptions/{{name}}`\n\n* `{{project}}/{{name}}`\n\n* `{{name}}`\n\nWhen using the `pulumi import` command, Subscription can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:pubsub/subscription:Subscription default projects/{{project}}/subscriptions/{{name}}\n```\n\n```sh\n$ pulumi import gcp:pubsub/subscription:Subscription default {{project}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:pubsub/subscription:Subscription default {{name}}\n```\n\n", + "description": "A named resource representing the stream of messages from a single,\nspecific topic, to be delivered to the subscribing application.\n\n\nTo get more information about Subscription, see:\n\n* [API documentation](https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions)\n* How-to Guides\n * [Managing Subscriptions](https://cloud.google.com/pubsub/docs/admin#managing_subscriptions)\n\n\u003e **Note:** You can retrieve the email of the Google Managed Pub/Sub Service Account used for forwarding\nby using the `gcp.projects.ServiceIdentity` resource.\n\n## Example Usage\n\n### Pubsub Subscription Push\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: example.id,\n ackDeadlineSeconds: 20,\n labels: {\n foo: \"bar\",\n },\n pushConfig: {\n pushEndpoint: \"https://example.com/push\",\n attributes: {\n \"x-goog-version\": \"v1\",\n },\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example.id,\n ack_deadline_seconds=20,\n labels={\n \"foo\": \"bar\",\n },\n push_config={\n \"push_endpoint\": \"https://example.com/push\",\n \"attributes\": {\n \"x_goog_version\": \"v1\",\n },\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = example.Id,\n AckDeadlineSeconds = 20,\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n PushConfig = new Gcp.PubSub.Inputs.SubscriptionPushConfigArgs\n {\n PushEndpoint = \"https://example.com/push\",\n Attributes = \n {\n { \"x-goog-version\", \"v1\" },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: example.ID(),\n\t\t\tAckDeadlineSeconds: pulumi.Int(20),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tPushConfig: \u0026pubsub.SubscriptionPushConfigArgs{\n\t\t\t\tPushEndpoint: pulumi.String(\"https://example.com/push\"),\n\t\t\t\tAttributes: pulumi.StringMap{\n\t\t\t\t\t\"x-goog-version\": pulumi.String(\"v1\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionPushConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Topic(\"example\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(example.id())\n .ackDeadlineSeconds(20)\n .labels(Map.of(\"foo\", \"bar\"))\n .pushConfig(SubscriptionPushConfigArgs.builder()\n .pushEndpoint(\"https://example.com/push\")\n .attributes(Map.of(\"x-goog-version\", \"v1\"))\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:pubsub:Topic\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${example.id}\n ackDeadlineSeconds: 20\n labels:\n foo: bar\n pushConfig:\n pushEndpoint: https://example.com/push\n attributes:\n x-goog-version: v1\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Pull\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: example.id,\n labels: {\n foo: \"bar\",\n },\n messageRetentionDuration: \"1200s\",\n retainAckedMessages: true,\n ackDeadlineSeconds: 20,\n expirationPolicy: {\n ttl: \"300000.5s\",\n },\n retryPolicy: {\n minimumBackoff: \"10s\",\n },\n enableMessageOrdering: false,\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example.id,\n labels={\n \"foo\": \"bar\",\n },\n message_retention_duration=\"1200s\",\n retain_acked_messages=True,\n ack_deadline_seconds=20,\n expiration_policy={\n \"ttl\": \"300000.5s\",\n },\n retry_policy={\n \"minimum_backoff\": \"10s\",\n },\n enable_message_ordering=False)\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = example.Id,\n Labels = \n {\n { \"foo\", \"bar\" },\n },\n MessageRetentionDuration = \"1200s\",\n RetainAckedMessages = true,\n AckDeadlineSeconds = 20,\n ExpirationPolicy = new Gcp.PubSub.Inputs.SubscriptionExpirationPolicyArgs\n {\n Ttl = \"300000.5s\",\n },\n RetryPolicy = new Gcp.PubSub.Inputs.SubscriptionRetryPolicyArgs\n {\n MinimumBackoff = \"10s\",\n },\n EnableMessageOrdering = false,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: example.ID(),\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"foo\": pulumi.String(\"bar\"),\n\t\t\t},\n\t\t\tMessageRetentionDuration: pulumi.String(\"1200s\"),\n\t\t\tRetainAckedMessages: pulumi.Bool(true),\n\t\t\tAckDeadlineSeconds: pulumi.Int(20),\n\t\t\tExpirationPolicy: \u0026pubsub.SubscriptionExpirationPolicyArgs{\n\t\t\t\tTtl: pulumi.String(\"300000.5s\"),\n\t\t\t},\n\t\t\tRetryPolicy: \u0026pubsub.SubscriptionRetryPolicyArgs{\n\t\t\t\tMinimumBackoff: pulumi.String(\"10s\"),\n\t\t\t},\n\t\t\tEnableMessageOrdering: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionExpirationPolicyArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionRetryPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Topic(\"example\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(example.id())\n .labels(Map.of(\"foo\", \"bar\"))\n .messageRetentionDuration(\"1200s\")\n .retainAckedMessages(true)\n .ackDeadlineSeconds(20)\n .expirationPolicy(SubscriptionExpirationPolicyArgs.builder()\n .ttl(\"300000.5s\")\n .build())\n .retryPolicy(SubscriptionRetryPolicyArgs.builder()\n .minimumBackoff(\"10s\")\n .build())\n .enableMessageOrdering(false)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:pubsub:Topic\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${example.id}\n labels:\n foo: bar\n messageRetentionDuration: 1200s\n retainAckedMessages: true\n ackDeadlineSeconds: 20\n expirationPolicy:\n ttl: 300000.5s\n retryPolicy:\n minimumBackoff: 10s\n enableMessageOrdering: false\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Dead Letter\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst exampleDeadLetter = new gcp.pubsub.Topic(\"example_dead_letter\", {name: \"example-topic-dead-letter\"});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: example.id,\n deadLetterPolicy: {\n deadLetterTopic: exampleDeadLetter.id,\n maxDeliveryAttempts: 10,\n },\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nexample_dead_letter = gcp.pubsub.Topic(\"example_dead_letter\", name=\"example-topic-dead-letter\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example.id,\n dead_letter_policy={\n \"dead_letter_topic\": example_dead_letter.id,\n \"max_delivery_attempts\": 10,\n })\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var exampleDeadLetter = new Gcp.PubSub.Topic(\"example_dead_letter\", new()\n {\n Name = \"example-topic-dead-letter\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = example.Id,\n DeadLetterPolicy = new Gcp.PubSub.Inputs.SubscriptionDeadLetterPolicyArgs\n {\n DeadLetterTopic = exampleDeadLetter.Id,\n MaxDeliveryAttempts = 10,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\texampleDeadLetter, err := pubsub.NewTopic(ctx, \"example_dead_letter\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic-dead-letter\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: example.ID(),\n\t\t\tDeadLetterPolicy: \u0026pubsub.SubscriptionDeadLetterPolicyArgs{\n\t\t\t\tDeadLetterTopic: exampleDeadLetter.ID(),\n\t\t\t\tMaxDeliveryAttempts: pulumi.Int(10),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionDeadLetterPolicyArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Topic(\"example\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n var exampleDeadLetter = new Topic(\"exampleDeadLetter\", TopicArgs.builder()\n .name(\"example-topic-dead-letter\")\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(example.id())\n .deadLetterPolicy(SubscriptionDeadLetterPolicyArgs.builder()\n .deadLetterTopic(exampleDeadLetter.id())\n .maxDeliveryAttempts(10)\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:pubsub:Topic\n properties:\n name: example-topic\n exampleDeadLetter:\n type: gcp:pubsub:Topic\n name: example_dead_letter\n properties:\n name: example-topic-dead-letter\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${example.id}\n deadLetterPolicy:\n deadLetterTopic: ${exampleDeadLetter.id}\n maxDeliveryAttempts: 10\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Push Bq\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst project = gcp.organizations.getProject({});\nconst viewer = new gcp.projects.IAMMember(\"viewer\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/bigquery.metadataViewer\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com`),\n});\nconst editor = new gcp.projects.IAMMember(\"editor\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/bigquery.dataEditor\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com`),\n});\nconst test = new gcp.bigquery.Dataset(\"test\", {datasetId: \"example_dataset\"});\nconst testTable = new gcp.bigquery.Table(\"test\", {\n deletionProtection: false,\n tableId: \"example_table\",\n datasetId: test.datasetId,\n schema: `[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n`,\n});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: example.id,\n bigqueryConfig: {\n table: pulumi.interpolate`${testTable.project}.${testTable.datasetId}.${testTable.tableId}`,\n },\n}, {\n dependsOn: [\n viewer,\n editor,\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nproject = gcp.organizations.get_project()\nviewer = gcp.projects.IAMMember(\"viewer\",\n project=project.project_id,\n role=\"roles/bigquery.metadataViewer\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\")\neditor = gcp.projects.IAMMember(\"editor\",\n project=project.project_id,\n role=\"roles/bigquery.dataEditor\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\")\ntest = gcp.bigquery.Dataset(\"test\", dataset_id=\"example_dataset\")\ntest_table = gcp.bigquery.Table(\"test\",\n deletion_protection=False,\n table_id=\"example_table\",\n dataset_id=test.dataset_id,\n schema=\"\"\"[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n\"\"\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example.id,\n bigquery_config={\n \"table\": pulumi.Output.all(\n project=test_table.project,\n dataset_id=test_table.dataset_id,\n table_id=test_table.table_id\n).apply(lambda resolved_outputs: f\"{resolved_outputs['project']}.{resolved_outputs['dataset_id']}.{resolved_outputs['table_id']}\")\n,\n },\n opts = pulumi.ResourceOptions(depends_on=[\n viewer,\n editor,\n ]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var viewer = new Gcp.Projects.IAMMember(\"viewer\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/bigquery.metadataViewer\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-pubsub.iam.gserviceaccount.com\",\n });\n\n var editor = new Gcp.Projects.IAMMember(\"editor\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/bigquery.dataEditor\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-pubsub.iam.gserviceaccount.com\",\n });\n\n var test = new Gcp.BigQuery.Dataset(\"test\", new()\n {\n DatasetId = \"example_dataset\",\n });\n\n var testTable = new Gcp.BigQuery.Table(\"test\", new()\n {\n DeletionProtection = false,\n TableId = \"example_table\",\n DatasetId = test.DatasetId,\n Schema = @\"[\n {\n \"\"name\"\": \"\"data\"\",\n \"\"type\"\": \"\"STRING\"\",\n \"\"mode\"\": \"\"NULLABLE\"\",\n \"\"description\"\": \"\"The data\"\"\n }\n]\n\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = example.Id,\n BigqueryConfig = new Gcp.PubSub.Inputs.SubscriptionBigqueryConfigArgs\n {\n Table = Output.Tuple(testTable.Project, testTable.DatasetId, testTable.TableId).Apply(values =\u003e\n {\n var project = values.Item1;\n var datasetId = values.Item2;\n var tableId = values.Item3;\n return $\"{project}.{datasetId}.{tableId}\";\n }),\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n viewer,\n editor,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tviewer, err := projects.NewIAMMember(ctx, \"viewer\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/bigquery.metadataViewer\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-pubsub.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\teditor, err := projects.NewIAMMember(ctx, \"editor\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/bigquery.dataEditor\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-pubsub.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttest, err := bigquery.NewDataset(ctx, \"test\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"example_dataset\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttestTable, err := bigquery.NewTable(ctx, \"test\", \u0026bigquery.TableArgs{\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tTableId: pulumi.String(\"example_table\"),\n\t\t\tDatasetId: test.DatasetId,\n\t\t\tSchema: pulumi.String(`[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n`),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: example.ID(),\n\t\t\tBigqueryConfig: \u0026pubsub.SubscriptionBigqueryConfigArgs{\n\t\t\t\tTable: pulumi.All(testTable.Project, testTable.DatasetId, testTable.TableId).ApplyT(func(_args []interface{}) (string, error) {\n\t\t\t\t\tproject := _args[0].(string)\n\t\t\t\t\tdatasetId := _args[1].(string)\n\t\t\t\t\ttableId := _args[2].(string)\n\t\t\t\t\treturn fmt.Sprintf(\"%v.%v.%v\", project, datasetId, tableId), nil\n\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tviewer,\n\t\t\teditor,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.projects.IAMMember;\nimport com.pulumi.gcp.projects.IAMMemberArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigquery.Table;\nimport com.pulumi.gcp.bigquery.TableArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionBigqueryConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Topic(\"example\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n var viewer = new IAMMember(\"viewer\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/bigquery.metadataViewer\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-pubsub.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var editor = new IAMMember(\"editor\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/bigquery.dataEditor\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-pubsub.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var test = new Dataset(\"test\", DatasetArgs.builder()\n .datasetId(\"example_dataset\")\n .build());\n\n var testTable = new Table(\"testTable\", TableArgs.builder()\n .deletionProtection(false)\n .tableId(\"example_table\")\n .datasetId(test.datasetId())\n .schema(\"\"\"\n[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n \"\"\")\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(example.id())\n .bigqueryConfig(SubscriptionBigqueryConfigArgs.builder()\n .table(Output.tuple(testTable.project(), testTable.datasetId(), testTable.tableId()).applyValue(values -\u003e {\n var project = values.t1;\n var datasetId = values.t2;\n var tableId = values.t3;\n return String.format(\"%s.%s.%s\", project.applyValue(getProjectResult -\u003e getProjectResult),datasetId,tableId);\n }))\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn( \n viewer,\n editor)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:pubsub:Topic\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${example.id}\n bigqueryConfig:\n table: ${testTable.project}.${testTable.datasetId}.${testTable.tableId}\n options:\n dependson:\n - ${viewer}\n - ${editor}\n viewer:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/bigquery.metadataViewer\n member: serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\n editor:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/bigquery.dataEditor\n member: serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\n test:\n type: gcp:bigquery:Dataset\n properties:\n datasetId: example_dataset\n testTable:\n type: gcp:bigquery:Table\n name: test\n properties:\n deletionProtection: false\n tableId: example_table\n datasetId: ${test.datasetId}\n schema: |\n [\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n ]\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Push Bq Table Schema\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst project = gcp.organizations.getProject({});\nconst viewer = new gcp.projects.IAMMember(\"viewer\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/bigquery.metadataViewer\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com`),\n});\nconst editor = new gcp.projects.IAMMember(\"editor\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/bigquery.dataEditor\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com`),\n});\nconst test = new gcp.bigquery.Dataset(\"test\", {datasetId: \"example_dataset\"});\nconst testTable = new gcp.bigquery.Table(\"test\", {\n deletionProtection: false,\n tableId: \"example_table\",\n datasetId: test.datasetId,\n schema: `[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n`,\n});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: example.id,\n bigqueryConfig: {\n table: pulumi.interpolate`${testTable.project}.${testTable.datasetId}.${testTable.tableId}`,\n useTableSchema: true,\n },\n}, {\n dependsOn: [\n viewer,\n editor,\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nproject = gcp.organizations.get_project()\nviewer = gcp.projects.IAMMember(\"viewer\",\n project=project.project_id,\n role=\"roles/bigquery.metadataViewer\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\")\neditor = gcp.projects.IAMMember(\"editor\",\n project=project.project_id,\n role=\"roles/bigquery.dataEditor\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\")\ntest = gcp.bigquery.Dataset(\"test\", dataset_id=\"example_dataset\")\ntest_table = gcp.bigquery.Table(\"test\",\n deletion_protection=False,\n table_id=\"example_table\",\n dataset_id=test.dataset_id,\n schema=\"\"\"[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n\"\"\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example.id,\n bigquery_config={\n \"table\": pulumi.Output.all(\n project=test_table.project,\n dataset_id=test_table.dataset_id,\n table_id=test_table.table_id\n).apply(lambda resolved_outputs: f\"{resolved_outputs['project']}.{resolved_outputs['dataset_id']}.{resolved_outputs['table_id']}\")\n,\n \"use_table_schema\": True,\n },\n opts = pulumi.ResourceOptions(depends_on=[\n viewer,\n editor,\n ]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var viewer = new Gcp.Projects.IAMMember(\"viewer\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/bigquery.metadataViewer\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-pubsub.iam.gserviceaccount.com\",\n });\n\n var editor = new Gcp.Projects.IAMMember(\"editor\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/bigquery.dataEditor\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-pubsub.iam.gserviceaccount.com\",\n });\n\n var test = new Gcp.BigQuery.Dataset(\"test\", new()\n {\n DatasetId = \"example_dataset\",\n });\n\n var testTable = new Gcp.BigQuery.Table(\"test\", new()\n {\n DeletionProtection = false,\n TableId = \"example_table\",\n DatasetId = test.DatasetId,\n Schema = @\"[\n {\n \"\"name\"\": \"\"data\"\",\n \"\"type\"\": \"\"STRING\"\",\n \"\"mode\"\": \"\"NULLABLE\"\",\n \"\"description\"\": \"\"The data\"\"\n }\n]\n\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = example.Id,\n BigqueryConfig = new Gcp.PubSub.Inputs.SubscriptionBigqueryConfigArgs\n {\n Table = Output.Tuple(testTable.Project, testTable.DatasetId, testTable.TableId).Apply(values =\u003e\n {\n var project = values.Item1;\n var datasetId = values.Item2;\n var tableId = values.Item3;\n return $\"{project}.{datasetId}.{tableId}\";\n }),\n UseTableSchema = true,\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n viewer,\n editor,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tviewer, err := projects.NewIAMMember(ctx, \"viewer\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/bigquery.metadataViewer\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-pubsub.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\teditor, err := projects.NewIAMMember(ctx, \"editor\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/bigquery.dataEditor\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-pubsub.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttest, err := bigquery.NewDataset(ctx, \"test\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"example_dataset\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttestTable, err := bigquery.NewTable(ctx, \"test\", \u0026bigquery.TableArgs{\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tTableId: pulumi.String(\"example_table\"),\n\t\t\tDatasetId: test.DatasetId,\n\t\t\tSchema: pulumi.String(`[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n`),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: example.ID(),\n\t\t\tBigqueryConfig: \u0026pubsub.SubscriptionBigqueryConfigArgs{\n\t\t\t\tTable: pulumi.All(testTable.Project, testTable.DatasetId, testTable.TableId).ApplyT(func(_args []interface{}) (string, error) {\n\t\t\t\t\tproject := _args[0].(string)\n\t\t\t\t\tdatasetId := _args[1].(string)\n\t\t\t\t\ttableId := _args[2].(string)\n\t\t\t\t\treturn fmt.Sprintf(\"%v.%v.%v\", project, datasetId, tableId), nil\n\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\tUseTableSchema: pulumi.Bool(true),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tviewer,\n\t\t\teditor,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.projects.IAMMember;\nimport com.pulumi.gcp.projects.IAMMemberArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigquery.Table;\nimport com.pulumi.gcp.bigquery.TableArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionBigqueryConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Topic(\"example\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n var viewer = new IAMMember(\"viewer\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/bigquery.metadataViewer\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-pubsub.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var editor = new IAMMember(\"editor\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/bigquery.dataEditor\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-pubsub.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var test = new Dataset(\"test\", DatasetArgs.builder()\n .datasetId(\"example_dataset\")\n .build());\n\n var testTable = new Table(\"testTable\", TableArgs.builder()\n .deletionProtection(false)\n .tableId(\"example_table\")\n .datasetId(test.datasetId())\n .schema(\"\"\"\n[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n \"\"\")\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(example.id())\n .bigqueryConfig(SubscriptionBigqueryConfigArgs.builder()\n .table(Output.tuple(testTable.project(), testTable.datasetId(), testTable.tableId()).applyValue(values -\u003e {\n var project = values.t1;\n var datasetId = values.t2;\n var tableId = values.t3;\n return String.format(\"%s.%s.%s\", project.applyValue(getProjectResult -\u003e getProjectResult),datasetId,tableId);\n }))\n .useTableSchema(true)\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn( \n viewer,\n editor)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:pubsub:Topic\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${example.id}\n bigqueryConfig:\n table: ${testTable.project}.${testTable.datasetId}.${testTable.tableId}\n useTableSchema: true\n options:\n dependson:\n - ${viewer}\n - ${editor}\n viewer:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/bigquery.metadataViewer\n member: serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\n editor:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/bigquery.dataEditor\n member: serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\n test:\n type: gcp:bigquery:Dataset\n properties:\n datasetId: example_dataset\n testTable:\n type: gcp:bigquery:Table\n name: test\n properties:\n deletionProtection: false\n tableId: example_table\n datasetId: ${test.datasetId}\n schema: |\n [\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n ]\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Push Bq Service Account\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst bqWriteServiceAccount = new gcp.serviceaccount.Account(\"bq_write_service_account\", {\n accountId: \"example-bqw\",\n displayName: \"BQ Write Service Account\",\n});\nconst project = gcp.organizations.getProject({});\nconst viewer = new gcp.projects.IAMMember(\"viewer\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/bigquery.metadataViewer\",\n member: pulumi.interpolate`serviceAccount:${bqWriteServiceAccount.email}`,\n});\nconst editor = new gcp.projects.IAMMember(\"editor\", {\n project: project.then(project =\u003e project.projectId),\n role: \"roles/bigquery.dataEditor\",\n member: pulumi.interpolate`serviceAccount:${bqWriteServiceAccount.email}`,\n});\nconst test = new gcp.bigquery.Dataset(\"test\", {datasetId: \"example_dataset\"});\nconst testTable = new gcp.bigquery.Table(\"test\", {\n deletionProtection: false,\n tableId: \"example_table\",\n datasetId: test.datasetId,\n schema: `[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n`,\n});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: example.id,\n bigqueryConfig: {\n table: pulumi.interpolate`${testTable.project}.${testTable.datasetId}.${testTable.tableId}`,\n serviceAccountEmail: bqWriteServiceAccount.email,\n },\n}, {\n dependsOn: [\n bqWriteServiceAccount,\n viewer,\n editor,\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nbq_write_service_account = gcp.serviceaccount.Account(\"bq_write_service_account\",\n account_id=\"example-bqw\",\n display_name=\"BQ Write Service Account\")\nproject = gcp.organizations.get_project()\nviewer = gcp.projects.IAMMember(\"viewer\",\n project=project.project_id,\n role=\"roles/bigquery.metadataViewer\",\n member=bq_write_service_account.email.apply(lambda email: f\"serviceAccount:{email}\"))\neditor = gcp.projects.IAMMember(\"editor\",\n project=project.project_id,\n role=\"roles/bigquery.dataEditor\",\n member=bq_write_service_account.email.apply(lambda email: f\"serviceAccount:{email}\"))\ntest = gcp.bigquery.Dataset(\"test\", dataset_id=\"example_dataset\")\ntest_table = gcp.bigquery.Table(\"test\",\n deletion_protection=False,\n table_id=\"example_table\",\n dataset_id=test.dataset_id,\n schema=\"\"\"[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n\"\"\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example.id,\n bigquery_config={\n \"table\": pulumi.Output.all(\n project=test_table.project,\n dataset_id=test_table.dataset_id,\n table_id=test_table.table_id\n).apply(lambda resolved_outputs: f\"{resolved_outputs['project']}.{resolved_outputs['dataset_id']}.{resolved_outputs['table_id']}\")\n,\n \"service_account_email\": bq_write_service_account.email,\n },\n opts = pulumi.ResourceOptions(depends_on=[\n bq_write_service_account,\n viewer,\n editor,\n ]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var bqWriteServiceAccount = new Gcp.ServiceAccount.Account(\"bq_write_service_account\", new()\n {\n AccountId = \"example-bqw\",\n DisplayName = \"BQ Write Service Account\",\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var viewer = new Gcp.Projects.IAMMember(\"viewer\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/bigquery.metadataViewer\",\n Member = bqWriteServiceAccount.Email.Apply(email =\u003e $\"serviceAccount:{email}\"),\n });\n\n var editor = new Gcp.Projects.IAMMember(\"editor\", new()\n {\n Project = project.Apply(getProjectResult =\u003e getProjectResult.ProjectId),\n Role = \"roles/bigquery.dataEditor\",\n Member = bqWriteServiceAccount.Email.Apply(email =\u003e $\"serviceAccount:{email}\"),\n });\n\n var test = new Gcp.BigQuery.Dataset(\"test\", new()\n {\n DatasetId = \"example_dataset\",\n });\n\n var testTable = new Gcp.BigQuery.Table(\"test\", new()\n {\n DeletionProtection = false,\n TableId = \"example_table\",\n DatasetId = test.DatasetId,\n Schema = @\"[\n {\n \"\"name\"\": \"\"data\"\",\n \"\"type\"\": \"\"STRING\"\",\n \"\"mode\"\": \"\"NULLABLE\"\",\n \"\"description\"\": \"\"The data\"\"\n }\n]\n\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = example.Id,\n BigqueryConfig = new Gcp.PubSub.Inputs.SubscriptionBigqueryConfigArgs\n {\n Table = Output.Tuple(testTable.Project, testTable.DatasetId, testTable.TableId).Apply(values =\u003e\n {\n var project = values.Item1;\n var datasetId = values.Item2;\n var tableId = values.Item3;\n return $\"{project}.{datasetId}.{tableId}\";\n }),\n ServiceAccountEmail = bqWriteServiceAccount.Email,\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n bqWriteServiceAccount,\n viewer,\n editor,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbqWriteServiceAccount, err := serviceaccount.NewAccount(ctx, \"bq_write_service_account\", \u0026serviceaccount.AccountArgs{\n\t\t\tAccountId: pulumi.String(\"example-bqw\"),\n\t\t\tDisplayName: pulumi.String(\"BQ Write Service Account\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tviewer, err := projects.NewIAMMember(ctx, \"viewer\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/bigquery.metadataViewer\"),\n\t\t\tMember: bqWriteServiceAccount.Email.ApplyT(func(email string) (string, error) {\n\t\t\t\treturn fmt.Sprintf(\"serviceAccount:%v\", email), nil\n\t\t\t}).(pulumi.StringOutput),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\teditor, err := projects.NewIAMMember(ctx, \"editor\", \u0026projects.IAMMemberArgs{\n\t\t\tProject: pulumi.String(project.ProjectId),\n\t\t\tRole: pulumi.String(\"roles/bigquery.dataEditor\"),\n\t\t\tMember: bqWriteServiceAccount.Email.ApplyT(func(email string) (string, error) {\n\t\t\t\treturn fmt.Sprintf(\"serviceAccount:%v\", email), nil\n\t\t\t}).(pulumi.StringOutput),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttest, err := bigquery.NewDataset(ctx, \"test\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"example_dataset\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttestTable, err := bigquery.NewTable(ctx, \"test\", \u0026bigquery.TableArgs{\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t\tTableId: pulumi.String(\"example_table\"),\n\t\t\tDatasetId: test.DatasetId,\n\t\t\tSchema: pulumi.String(`[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n`),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: example.ID(),\n\t\t\tBigqueryConfig: \u0026pubsub.SubscriptionBigqueryConfigArgs{\n\t\t\t\tTable: pulumi.All(testTable.Project, testTable.DatasetId, testTable.TableId).ApplyT(func(_args []interface{}) (string, error) {\n\t\t\t\t\tproject := _args[0].(string)\n\t\t\t\t\tdatasetId := _args[1].(string)\n\t\t\t\t\ttableId := _args[2].(string)\n\t\t\t\t\treturn fmt.Sprintf(\"%v.%v.%v\", project, datasetId, tableId), nil\n\t\t\t\t}).(pulumi.StringOutput),\n\t\t\t\tServiceAccountEmail: bqWriteServiceAccount.Email,\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tbqWriteServiceAccount,\n\t\t\tviewer,\n\t\t\teditor,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.serviceaccount.Account;\nimport com.pulumi.gcp.serviceaccount.AccountArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.projects.IAMMember;\nimport com.pulumi.gcp.projects.IAMMemberArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.bigquery.Table;\nimport com.pulumi.gcp.bigquery.TableArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionBigqueryConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Topic(\"example\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n var bqWriteServiceAccount = new Account(\"bqWriteServiceAccount\", AccountArgs.builder()\n .accountId(\"example-bqw\")\n .displayName(\"BQ Write Service Account\")\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n var viewer = new IAMMember(\"viewer\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/bigquery.metadataViewer\")\n .member(bqWriteServiceAccount.email().applyValue(email -\u003e String.format(\"serviceAccount:%s\", email)))\n .build());\n\n var editor = new IAMMember(\"editor\", IAMMemberArgs.builder()\n .project(project.applyValue(getProjectResult -\u003e getProjectResult.projectId()))\n .role(\"roles/bigquery.dataEditor\")\n .member(bqWriteServiceAccount.email().applyValue(email -\u003e String.format(\"serviceAccount:%s\", email)))\n .build());\n\n var test = new Dataset(\"test\", DatasetArgs.builder()\n .datasetId(\"example_dataset\")\n .build());\n\n var testTable = new Table(\"testTable\", TableArgs.builder()\n .deletionProtection(false)\n .tableId(\"example_table\")\n .datasetId(test.datasetId())\n .schema(\"\"\"\n[\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n]\n \"\"\")\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(example.id())\n .bigqueryConfig(SubscriptionBigqueryConfigArgs.builder()\n .table(Output.tuple(testTable.project(), testTable.datasetId(), testTable.tableId()).applyValue(values -\u003e {\n var project = values.t1;\n var datasetId = values.t2;\n var tableId = values.t3;\n return String.format(\"%s.%s.%s\", project.applyValue(getProjectResult -\u003e getProjectResult),datasetId,tableId);\n }))\n .serviceAccountEmail(bqWriteServiceAccount.email())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn( \n bqWriteServiceAccount,\n viewer,\n editor)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:pubsub:Topic\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${example.id}\n bigqueryConfig:\n table: ${testTable.project}.${testTable.datasetId}.${testTable.tableId}\n serviceAccountEmail: ${bqWriteServiceAccount.email}\n options:\n dependson:\n - ${bqWriteServiceAccount}\n - ${viewer}\n - ${editor}\n bqWriteServiceAccount:\n type: gcp:serviceaccount:Account\n name: bq_write_service_account\n properties:\n accountId: example-bqw\n displayName: BQ Write Service Account\n viewer:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/bigquery.metadataViewer\n member: serviceAccount:${bqWriteServiceAccount.email}\n editor:\n type: gcp:projects:IAMMember\n properties:\n project: ${project.projectId}\n role: roles/bigquery.dataEditor\n member: serviceAccount:${bqWriteServiceAccount.email}\n test:\n type: gcp:bigquery:Dataset\n properties:\n datasetId: example_dataset\n testTable:\n type: gcp:bigquery:Table\n name: test\n properties:\n deletionProtection: false\n tableId: example_table\n datasetId: ${test.datasetId}\n schema: |\n [\n {\n \"name\": \"data\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": \"The data\"\n }\n ]\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Push Cloudstorage\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.storage.Bucket(\"example\", {\n name: \"example-bucket\",\n location: \"US\",\n uniformBucketLevelAccess: true,\n});\nconst exampleTopic = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst project = gcp.organizations.getProject({});\nconst admin = new gcp.storage.BucketIAMMember(\"admin\", {\n bucket: example.name,\n role: \"roles/storage.admin\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com`),\n});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: exampleTopic.id,\n cloudStorageConfig: {\n bucket: example.name,\n filenamePrefix: \"pre-\",\n filenameSuffix: \"-_91980\",\n filenameDatetimeFormat: \"YYYY-MM-DD/hh_mm_ssZ\",\n maxBytes: 1000,\n maxDuration: \"300s\",\n maxMessages: 1000,\n },\n}, {\n dependsOn: [\n example,\n admin,\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.storage.Bucket(\"example\",\n name=\"example-bucket\",\n location=\"US\",\n uniform_bucket_level_access=True)\nexample_topic = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nproject = gcp.organizations.get_project()\nadmin = gcp.storage.BucketIAMMember(\"admin\",\n bucket=example.name,\n role=\"roles/storage.admin\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example_topic.id,\n cloud_storage_config={\n \"bucket\": example.name,\n \"filename_prefix\": \"pre-\",\n \"filename_suffix\": \"-_91980\",\n \"filename_datetime_format\": \"YYYY-MM-DD/hh_mm_ssZ\",\n \"max_bytes\": 1000,\n \"max_duration\": \"300s\",\n \"max_messages\": 1000,\n },\n opts = pulumi.ResourceOptions(depends_on=[\n example,\n admin,\n ]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.Storage.Bucket(\"example\", new()\n {\n Name = \"example-bucket\",\n Location = \"US\",\n UniformBucketLevelAccess = true,\n });\n\n var exampleTopic = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var admin = new Gcp.Storage.BucketIAMMember(\"admin\", new()\n {\n Bucket = example.Name,\n Role = \"roles/storage.admin\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-pubsub.iam.gserviceaccount.com\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = exampleTopic.Id,\n CloudStorageConfig = new Gcp.PubSub.Inputs.SubscriptionCloudStorageConfigArgs\n {\n Bucket = example.Name,\n FilenamePrefix = \"pre-\",\n FilenameSuffix = \"-_91980\",\n FilenameDatetimeFormat = \"YYYY-MM-DD/hh_mm_ssZ\",\n MaxBytes = 1000,\n MaxDuration = \"300s\",\n MaxMessages = 1000,\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n example,\n admin,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := storage.NewBucket(ctx, \"example\", \u0026storage.BucketArgs{\n\t\t\tName: pulumi.String(\"example-bucket\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tUniformBucketLevelAccess: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\texampleTopic, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tadmin, err := storage.NewBucketIAMMember(ctx, \"admin\", \u0026storage.BucketIAMMemberArgs{\n\t\t\tBucket: example.Name,\n\t\t\tRole: pulumi.String(\"roles/storage.admin\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-pubsub.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: exampleTopic.ID(),\n\t\t\tCloudStorageConfig: \u0026pubsub.SubscriptionCloudStorageConfigArgs{\n\t\t\t\tBucket: example.Name,\n\t\t\t\tFilenamePrefix: pulumi.String(\"pre-\"),\n\t\t\t\tFilenameSuffix: pulumi.String(\"-_91980\"),\n\t\t\t\tFilenameDatetimeFormat: pulumi.String(\"YYYY-MM-DD/hh_mm_ssZ\"),\n\t\t\t\tMaxBytes: pulumi.Int(1000),\n\t\t\t\tMaxDuration: pulumi.String(\"300s\"),\n\t\t\t\tMaxMessages: pulumi.Int(1000),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\texample,\n\t\t\tadmin,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.storage.Bucket;\nimport com.pulumi.gcp.storage.BucketArgs;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.storage.BucketIAMMember;\nimport com.pulumi.gcp.storage.BucketIAMMemberArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionCloudStorageConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Bucket(\"example\", BucketArgs.builder()\n .name(\"example-bucket\")\n .location(\"US\")\n .uniformBucketLevelAccess(true)\n .build());\n\n var exampleTopic = new Topic(\"exampleTopic\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n var admin = new BucketIAMMember(\"admin\", BucketIAMMemberArgs.builder()\n .bucket(example.name())\n .role(\"roles/storage.admin\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-pubsub.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(exampleTopic.id())\n .cloudStorageConfig(SubscriptionCloudStorageConfigArgs.builder()\n .bucket(example.name())\n .filenamePrefix(\"pre-\")\n .filenameSuffix(\"-_91980\")\n .filenameDatetimeFormat(\"YYYY-MM-DD/hh_mm_ssZ\")\n .maxBytes(1000)\n .maxDuration(\"300s\")\n .maxMessages(1000)\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn( \n example,\n admin)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:storage:Bucket\n properties:\n name: example-bucket\n location: US\n uniformBucketLevelAccess: true\n exampleTopic:\n type: gcp:pubsub:Topic\n name: example\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${exampleTopic.id}\n cloudStorageConfig:\n bucket: ${example.name}\n filenamePrefix: pre-\n filenameSuffix: -_91980\n filenameDatetimeFormat: YYYY-MM-DD/hh_mm_ssZ\n maxBytes: 1000\n maxDuration: 300s\n maxMessages: 1000\n options:\n dependson:\n - ${example}\n - ${admin}\n admin:\n type: gcp:storage:BucketIAMMember\n properties:\n bucket: ${example.name}\n role: roles/storage.admin\n member: serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Push Cloudstorage Avro\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.storage.Bucket(\"example\", {\n name: \"example-bucket\",\n location: \"US\",\n uniformBucketLevelAccess: true,\n});\nconst exampleTopic = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst project = gcp.organizations.getProject({});\nconst admin = new gcp.storage.BucketIAMMember(\"admin\", {\n bucket: example.name,\n role: \"roles/storage.admin\",\n member: project.then(project =\u003e `serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com`),\n});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: exampleTopic.id,\n cloudStorageConfig: {\n bucket: example.name,\n filenamePrefix: \"pre-\",\n filenameSuffix: \"-_37118\",\n filenameDatetimeFormat: \"YYYY-MM-DD/hh_mm_ssZ\",\n maxBytes: 1000,\n maxDuration: \"300s\",\n maxMessages: 1000,\n avroConfig: {\n writeMetadata: true,\n useTopicSchema: true,\n },\n },\n}, {\n dependsOn: [\n example,\n admin,\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.storage.Bucket(\"example\",\n name=\"example-bucket\",\n location=\"US\",\n uniform_bucket_level_access=True)\nexample_topic = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nproject = gcp.organizations.get_project()\nadmin = gcp.storage.BucketIAMMember(\"admin\",\n bucket=example.name,\n role=\"roles/storage.admin\",\n member=f\"serviceAccount:service-{project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\")\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example_topic.id,\n cloud_storage_config={\n \"bucket\": example.name,\n \"filename_prefix\": \"pre-\",\n \"filename_suffix\": \"-_37118\",\n \"filename_datetime_format\": \"YYYY-MM-DD/hh_mm_ssZ\",\n \"max_bytes\": 1000,\n \"max_duration\": \"300s\",\n \"max_messages\": 1000,\n \"avro_config\": {\n \"write_metadata\": True,\n \"use_topic_schema\": True,\n },\n },\n opts = pulumi.ResourceOptions(depends_on=[\n example,\n admin,\n ]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.Storage.Bucket(\"example\", new()\n {\n Name = \"example-bucket\",\n Location = \"US\",\n UniformBucketLevelAccess = true,\n });\n\n var exampleTopic = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n var admin = new Gcp.Storage.BucketIAMMember(\"admin\", new()\n {\n Bucket = example.Name,\n Role = \"roles/storage.admin\",\n Member = $\"serviceAccount:service-{project.Apply(getProjectResult =\u003e getProjectResult.Number)}@gcp-sa-pubsub.iam.gserviceaccount.com\",\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = exampleTopic.Id,\n CloudStorageConfig = new Gcp.PubSub.Inputs.SubscriptionCloudStorageConfigArgs\n {\n Bucket = example.Name,\n FilenamePrefix = \"pre-\",\n FilenameSuffix = \"-_37118\",\n FilenameDatetimeFormat = \"YYYY-MM-DD/hh_mm_ssZ\",\n MaxBytes = 1000,\n MaxDuration = \"300s\",\n MaxMessages = 1000,\n AvroConfig = new Gcp.PubSub.Inputs.SubscriptionCloudStorageConfigAvroConfigArgs\n {\n WriteMetadata = true,\n UseTopicSchema = true,\n },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n example,\n admin,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := storage.NewBucket(ctx, \"example\", \u0026storage.BucketArgs{\n\t\t\tName: pulumi.String(\"example-bucket\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tUniformBucketLevelAccess: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\texampleTopic, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproject, err := organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tadmin, err := storage.NewBucketIAMMember(ctx, \"admin\", \u0026storage.BucketIAMMemberArgs{\n\t\t\tBucket: example.Name,\n\t\t\tRole: pulumi.String(\"roles/storage.admin\"),\n\t\t\tMember: pulumi.Sprintf(\"serviceAccount:service-%v@gcp-sa-pubsub.iam.gserviceaccount.com\", project.Number),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: exampleTopic.ID(),\n\t\t\tCloudStorageConfig: \u0026pubsub.SubscriptionCloudStorageConfigArgs{\n\t\t\t\tBucket: example.Name,\n\t\t\t\tFilenamePrefix: pulumi.String(\"pre-\"),\n\t\t\t\tFilenameSuffix: pulumi.String(\"-_37118\"),\n\t\t\t\tFilenameDatetimeFormat: pulumi.String(\"YYYY-MM-DD/hh_mm_ssZ\"),\n\t\t\t\tMaxBytes: pulumi.Int(1000),\n\t\t\t\tMaxDuration: pulumi.String(\"300s\"),\n\t\t\t\tMaxMessages: pulumi.Int(1000),\n\t\t\t\tAvroConfig: \u0026pubsub.SubscriptionCloudStorageConfigAvroConfigArgs{\n\t\t\t\t\tWriteMetadata: pulumi.Bool(true),\n\t\t\t\t\tUseTopicSchema: pulumi.Bool(true),\n\t\t\t\t},\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\texample,\n\t\t\tadmin,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.storage.Bucket;\nimport com.pulumi.gcp.storage.BucketArgs;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.gcp.storage.BucketIAMMember;\nimport com.pulumi.gcp.storage.BucketIAMMemberArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionCloudStorageConfigArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionCloudStorageConfigAvroConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Bucket(\"example\", BucketArgs.builder()\n .name(\"example-bucket\")\n .location(\"US\")\n .uniformBucketLevelAccess(true)\n .build());\n\n var exampleTopic = new Topic(\"exampleTopic\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n var admin = new BucketIAMMember(\"admin\", BucketIAMMemberArgs.builder()\n .bucket(example.name())\n .role(\"roles/storage.admin\")\n .member(String.format(\"serviceAccount:service-%s@gcp-sa-pubsub.iam.gserviceaccount.com\", project.applyValue(getProjectResult -\u003e getProjectResult.number())))\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(exampleTopic.id())\n .cloudStorageConfig(SubscriptionCloudStorageConfigArgs.builder()\n .bucket(example.name())\n .filenamePrefix(\"pre-\")\n .filenameSuffix(\"-_37118\")\n .filenameDatetimeFormat(\"YYYY-MM-DD/hh_mm_ssZ\")\n .maxBytes(1000)\n .maxDuration(\"300s\")\n .maxMessages(1000)\n .avroConfig(SubscriptionCloudStorageConfigAvroConfigArgs.builder()\n .writeMetadata(true)\n .useTopicSchema(true)\n .build())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn( \n example,\n admin)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:storage:Bucket\n properties:\n name: example-bucket\n location: US\n uniformBucketLevelAccess: true\n exampleTopic:\n type: gcp:pubsub:Topic\n name: example\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${exampleTopic.id}\n cloudStorageConfig:\n bucket: ${example.name}\n filenamePrefix: pre-\n filenameSuffix: -_37118\n filenameDatetimeFormat: YYYY-MM-DD/hh_mm_ssZ\n maxBytes: 1000\n maxDuration: 300s\n maxMessages: 1000\n avroConfig:\n writeMetadata: true\n useTopicSchema: true\n options:\n dependson:\n - ${example}\n - ${admin}\n admin:\n type: gcp:storage:BucketIAMMember\n properties:\n bucket: ${example.name}\n role: roles/storage.admin\n member: serviceAccount:service-${project.number}@gcp-sa-pubsub.iam.gserviceaccount.com\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Pubsub Subscription Push Cloudstorage Service Account\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst example = new gcp.storage.Bucket(\"example\", {\n name: \"example-bucket\",\n location: \"US\",\n uniformBucketLevelAccess: true,\n});\nconst exampleTopic = new gcp.pubsub.Topic(\"example\", {name: \"example-topic\"});\nconst storageWriteServiceAccount = new gcp.serviceaccount.Account(\"storage_write_service_account\", {\n accountId: \"example-stw\",\n displayName: \"Storage Write Service Account\",\n});\nconst admin = new gcp.storage.BucketIAMMember(\"admin\", {\n bucket: example.name,\n role: \"roles/storage.admin\",\n member: pulumi.interpolate`serviceAccount:${storageWriteServiceAccount.email}`,\n});\nconst exampleSubscription = new gcp.pubsub.Subscription(\"example\", {\n name: \"example-subscription\",\n topic: exampleTopic.id,\n cloudStorageConfig: {\n bucket: example.name,\n filenamePrefix: \"pre-\",\n filenameSuffix: \"-_80332\",\n filenameDatetimeFormat: \"YYYY-MM-DD/hh_mm_ssZ\",\n maxBytes: 1000,\n maxDuration: \"300s\",\n serviceAccountEmail: storageWriteServiceAccount.email,\n },\n}, {\n dependsOn: [\n storageWriteServiceAccount,\n example,\n admin,\n ],\n});\nconst project = gcp.organizations.getProject({});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nexample = gcp.storage.Bucket(\"example\",\n name=\"example-bucket\",\n location=\"US\",\n uniform_bucket_level_access=True)\nexample_topic = gcp.pubsub.Topic(\"example\", name=\"example-topic\")\nstorage_write_service_account = gcp.serviceaccount.Account(\"storage_write_service_account\",\n account_id=\"example-stw\",\n display_name=\"Storage Write Service Account\")\nadmin = gcp.storage.BucketIAMMember(\"admin\",\n bucket=example.name,\n role=\"roles/storage.admin\",\n member=storage_write_service_account.email.apply(lambda email: f\"serviceAccount:{email}\"))\nexample_subscription = gcp.pubsub.Subscription(\"example\",\n name=\"example-subscription\",\n topic=example_topic.id,\n cloud_storage_config={\n \"bucket\": example.name,\n \"filename_prefix\": \"pre-\",\n \"filename_suffix\": \"-_80332\",\n \"filename_datetime_format\": \"YYYY-MM-DD/hh_mm_ssZ\",\n \"max_bytes\": 1000,\n \"max_duration\": \"300s\",\n \"service_account_email\": storage_write_service_account.email,\n },\n opts = pulumi.ResourceOptions(depends_on=[\n storage_write_service_account,\n example,\n admin,\n ]))\nproject = gcp.organizations.get_project()\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var example = new Gcp.Storage.Bucket(\"example\", new()\n {\n Name = \"example-bucket\",\n Location = \"US\",\n UniformBucketLevelAccess = true,\n });\n\n var exampleTopic = new Gcp.PubSub.Topic(\"example\", new()\n {\n Name = \"example-topic\",\n });\n\n var storageWriteServiceAccount = new Gcp.ServiceAccount.Account(\"storage_write_service_account\", new()\n {\n AccountId = \"example-stw\",\n DisplayName = \"Storage Write Service Account\",\n });\n\n var admin = new Gcp.Storage.BucketIAMMember(\"admin\", new()\n {\n Bucket = example.Name,\n Role = \"roles/storage.admin\",\n Member = storageWriteServiceAccount.Email.Apply(email =\u003e $\"serviceAccount:{email}\"),\n });\n\n var exampleSubscription = new Gcp.PubSub.Subscription(\"example\", new()\n {\n Name = \"example-subscription\",\n Topic = exampleTopic.Id,\n CloudStorageConfig = new Gcp.PubSub.Inputs.SubscriptionCloudStorageConfigArgs\n {\n Bucket = example.Name,\n FilenamePrefix = \"pre-\",\n FilenameSuffix = \"-_80332\",\n FilenameDatetimeFormat = \"YYYY-MM-DD/hh_mm_ssZ\",\n MaxBytes = 1000,\n MaxDuration = \"300s\",\n ServiceAccountEmail = storageWriteServiceAccount.Email,\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n storageWriteServiceAccount,\n example,\n admin,\n },\n });\n\n var project = Gcp.Organizations.GetProject.Invoke();\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/pubsub\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\texample, err := storage.NewBucket(ctx, \"example\", \u0026storage.BucketArgs{\n\t\t\tName: pulumi.String(\"example-bucket\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tUniformBucketLevelAccess: pulumi.Bool(true),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\texampleTopic, err := pubsub.NewTopic(ctx, \"example\", \u0026pubsub.TopicArgs{\n\t\t\tName: pulumi.String(\"example-topic\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstorageWriteServiceAccount, err := serviceaccount.NewAccount(ctx, \"storage_write_service_account\", \u0026serviceaccount.AccountArgs{\n\t\t\tAccountId: pulumi.String(\"example-stw\"),\n\t\t\tDisplayName: pulumi.String(\"Storage Write Service Account\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tadmin, err := storage.NewBucketIAMMember(ctx, \"admin\", \u0026storage.BucketIAMMemberArgs{\n\t\t\tBucket: example.Name,\n\t\t\tRole: pulumi.String(\"roles/storage.admin\"),\n\t\t\tMember: storageWriteServiceAccount.Email.ApplyT(func(email string) (string, error) {\n\t\t\t\treturn fmt.Sprintf(\"serviceAccount:%v\", email), nil\n\t\t\t}).(pulumi.StringOutput),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = pubsub.NewSubscription(ctx, \"example\", \u0026pubsub.SubscriptionArgs{\n\t\t\tName: pulumi.String(\"example-subscription\"),\n\t\t\tTopic: exampleTopic.ID(),\n\t\t\tCloudStorageConfig: \u0026pubsub.SubscriptionCloudStorageConfigArgs{\n\t\t\t\tBucket: example.Name,\n\t\t\t\tFilenamePrefix: pulumi.String(\"pre-\"),\n\t\t\t\tFilenameSuffix: pulumi.String(\"-_80332\"),\n\t\t\t\tFilenameDatetimeFormat: pulumi.String(\"YYYY-MM-DD/hh_mm_ssZ\"),\n\t\t\t\tMaxBytes: pulumi.Int(1000),\n\t\t\t\tMaxDuration: pulumi.String(\"300s\"),\n\t\t\t\tServiceAccountEmail: storageWriteServiceAccount.Email,\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\tstorageWriteServiceAccount,\n\t\t\texample,\n\t\t\tadmin,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = organizations.LookupProject(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.storage.Bucket;\nimport com.pulumi.gcp.storage.BucketArgs;\nimport com.pulumi.gcp.pubsub.Topic;\nimport com.pulumi.gcp.pubsub.TopicArgs;\nimport com.pulumi.gcp.serviceaccount.Account;\nimport com.pulumi.gcp.serviceaccount.AccountArgs;\nimport com.pulumi.gcp.storage.BucketIAMMember;\nimport com.pulumi.gcp.storage.BucketIAMMemberArgs;\nimport com.pulumi.gcp.pubsub.Subscription;\nimport com.pulumi.gcp.pubsub.SubscriptionArgs;\nimport com.pulumi.gcp.pubsub.inputs.SubscriptionCloudStorageConfigArgs;\nimport com.pulumi.gcp.organizations.OrganizationsFunctions;\nimport com.pulumi.gcp.organizations.inputs.GetProjectArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var example = new Bucket(\"example\", BucketArgs.builder()\n .name(\"example-bucket\")\n .location(\"US\")\n .uniformBucketLevelAccess(true)\n .build());\n\n var exampleTopic = new Topic(\"exampleTopic\", TopicArgs.builder()\n .name(\"example-topic\")\n .build());\n\n var storageWriteServiceAccount = new Account(\"storageWriteServiceAccount\", AccountArgs.builder()\n .accountId(\"example-stw\")\n .displayName(\"Storage Write Service Account\")\n .build());\n\n var admin = new BucketIAMMember(\"admin\", BucketIAMMemberArgs.builder()\n .bucket(example.name())\n .role(\"roles/storage.admin\")\n .member(storageWriteServiceAccount.email().applyValue(email -\u003e String.format(\"serviceAccount:%s\", email)))\n .build());\n\n var exampleSubscription = new Subscription(\"exampleSubscription\", SubscriptionArgs.builder()\n .name(\"example-subscription\")\n .topic(exampleTopic.id())\n .cloudStorageConfig(SubscriptionCloudStorageConfigArgs.builder()\n .bucket(example.name())\n .filenamePrefix(\"pre-\")\n .filenameSuffix(\"-_80332\")\n .filenameDatetimeFormat(\"YYYY-MM-DD/hh_mm_ssZ\")\n .maxBytes(1000)\n .maxDuration(\"300s\")\n .serviceAccountEmail(storageWriteServiceAccount.email())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn( \n storageWriteServiceAccount,\n example,\n admin)\n .build());\n\n final var project = OrganizationsFunctions.getProject();\n\n }\n}\n```\n```yaml\nresources:\n example:\n type: gcp:storage:Bucket\n properties:\n name: example-bucket\n location: US\n uniformBucketLevelAccess: true\n exampleTopic:\n type: gcp:pubsub:Topic\n name: example\n properties:\n name: example-topic\n exampleSubscription:\n type: gcp:pubsub:Subscription\n name: example\n properties:\n name: example-subscription\n topic: ${exampleTopic.id}\n cloudStorageConfig:\n bucket: ${example.name}\n filenamePrefix: pre-\n filenameSuffix: -_80332\n filenameDatetimeFormat: YYYY-MM-DD/hh_mm_ssZ\n maxBytes: 1000\n maxDuration: 300s\n serviceAccountEmail: ${storageWriteServiceAccount.email}\n options:\n dependson:\n - ${storageWriteServiceAccount}\n - ${example}\n - ${admin}\n storageWriteServiceAccount:\n type: gcp:serviceaccount:Account\n name: storage_write_service_account\n properties:\n accountId: example-stw\n displayName: Storage Write Service Account\n admin:\n type: gcp:storage:BucketIAMMember\n properties:\n bucket: ${example.name}\n role: roles/storage.admin\n member: serviceAccount:${storageWriteServiceAccount.email}\nvariables:\n project:\n fn::invoke:\n Function: gcp:organizations:getProject\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nSubscription can be imported using any of these accepted formats:\n\n* `projects/{{project}}/subscriptions/{{name}}`\n\n* `{{project}}/{{name}}`\n\n* `{{name}}`\n\nWhen using the `pulumi import` command, Subscription can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:pubsub/subscription:Subscription default projects/{{project}}/subscriptions/{{name}}\n```\n\n```sh\n$ pulumi import gcp:pubsub/subscription:Subscription default {{project}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:pubsub/subscription:Subscription default {{name}}\n```\n\n", "properties": { "ackDeadlineSeconds": { "type": "integer", @@ -232436,7 +234039,7 @@ } }, "gcp:redis/cluster:Cluster": { - "description": "A Google Cloud Redis Cluster instance.\n\n\nTo get more information about Cluster, see:\n\n* [API documentation](https://cloud.google.com/memorystore/docs/cluster/reference/rest/v1/projects.locations.clusters)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/memorystore/docs/cluster/)\n\n## Example Usage\n\n### Redis Cluster Ha\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst producerNet = new gcp.compute.Network(\"producer_net\", {\n name: \"mynetwork\",\n autoCreateSubnetworks: false,\n});\nconst producerSubnet = new gcp.compute.Subnetwork(\"producer_subnet\", {\n name: \"mysubnet\",\n ipCidrRange: \"10.0.0.248/29\",\n region: \"us-central1\",\n network: producerNet.id,\n});\nconst _default = new gcp.networkconnectivity.ServiceConnectionPolicy(\"default\", {\n name: \"mypolicy\",\n location: \"us-central1\",\n serviceClass: \"gcp-memorystore-redis\",\n description: \"my basic service connection policy\",\n network: producerNet.id,\n pscConfig: {\n subnetworks: [producerSubnet.id],\n },\n});\nconst cluster_ha = new gcp.redis.Cluster(\"cluster-ha\", {\n name: \"ha-cluster\",\n shardCount: 3,\n pscConfigs: [{\n network: producerNet.id,\n }],\n region: \"us-central1\",\n replicaCount: 1,\n nodeType: \"REDIS_SHARED_CORE_NANO\",\n transitEncryptionMode: \"TRANSIT_ENCRYPTION_MODE_DISABLED\",\n authorizationMode: \"AUTH_MODE_DISABLED\",\n redisConfigs: {\n \"maxmemory-policy\": \"volatile-ttl\",\n },\n deletionProtectionEnabled: true,\n zoneDistributionConfig: {\n mode: \"MULTI_ZONE\",\n },\n}, {\n dependsOn: [_default],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproducer_net = gcp.compute.Network(\"producer_net\",\n name=\"mynetwork\",\n auto_create_subnetworks=False)\nproducer_subnet = gcp.compute.Subnetwork(\"producer_subnet\",\n name=\"mysubnet\",\n ip_cidr_range=\"10.0.0.248/29\",\n region=\"us-central1\",\n network=producer_net.id)\ndefault = gcp.networkconnectivity.ServiceConnectionPolicy(\"default\",\n name=\"mypolicy\",\n location=\"us-central1\",\n service_class=\"gcp-memorystore-redis\",\n description=\"my basic service connection policy\",\n network=producer_net.id,\n psc_config={\n \"subnetworks\": [producer_subnet.id],\n })\ncluster_ha = gcp.redis.Cluster(\"cluster-ha\",\n name=\"ha-cluster\",\n shard_count=3,\n psc_configs=[{\n \"network\": producer_net.id,\n }],\n region=\"us-central1\",\n replica_count=1,\n node_type=\"REDIS_SHARED_CORE_NANO\",\n transit_encryption_mode=\"TRANSIT_ENCRYPTION_MODE_DISABLED\",\n authorization_mode=\"AUTH_MODE_DISABLED\",\n redis_configs={\n \"maxmemory-policy\": \"volatile-ttl\",\n },\n deletion_protection_enabled=True,\n zone_distribution_config={\n \"mode\": \"MULTI_ZONE\",\n },\n opts = pulumi.ResourceOptions(depends_on=[default]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var producerNet = new Gcp.Compute.Network(\"producer_net\", new()\n {\n Name = \"mynetwork\",\n AutoCreateSubnetworks = false,\n });\n\n var producerSubnet = new Gcp.Compute.Subnetwork(\"producer_subnet\", new()\n {\n Name = \"mysubnet\",\n IpCidrRange = \"10.0.0.248/29\",\n Region = \"us-central1\",\n Network = producerNet.Id,\n });\n\n var @default = new Gcp.NetworkConnectivity.ServiceConnectionPolicy(\"default\", new()\n {\n Name = \"mypolicy\",\n Location = \"us-central1\",\n ServiceClass = \"gcp-memorystore-redis\",\n Description = \"my basic service connection policy\",\n Network = producerNet.Id,\n PscConfig = new Gcp.NetworkConnectivity.Inputs.ServiceConnectionPolicyPscConfigArgs\n {\n Subnetworks = new[]\n {\n producerSubnet.Id,\n },\n },\n });\n\n var cluster_ha = new Gcp.Redis.Cluster(\"cluster-ha\", new()\n {\n Name = \"ha-cluster\",\n ShardCount = 3,\n PscConfigs = new[]\n {\n new Gcp.Redis.Inputs.ClusterPscConfigArgs\n {\n Network = producerNet.Id,\n },\n },\n Region = \"us-central1\",\n ReplicaCount = 1,\n NodeType = \"REDIS_SHARED_CORE_NANO\",\n TransitEncryptionMode = \"TRANSIT_ENCRYPTION_MODE_DISABLED\",\n AuthorizationMode = \"AUTH_MODE_DISABLED\",\n RedisConfigs = \n {\n { \"maxmemory-policy\", \"volatile-ttl\" },\n },\n DeletionProtectionEnabled = true,\n ZoneDistributionConfig = new Gcp.Redis.Inputs.ClusterZoneDistributionConfigArgs\n {\n Mode = \"MULTI_ZONE\",\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n @default,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networkconnectivity\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/redis\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tproducerNet, err := compute.NewNetwork(ctx, \"producer_net\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"mynetwork\"),\n\t\t\tAutoCreateSubnetworks: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproducerSubnet, err := compute.NewSubnetwork(ctx, \"producer_subnet\", \u0026compute.SubnetworkArgs{\n\t\t\tName: pulumi.String(\"mysubnet\"),\n\t\t\tIpCidrRange: pulumi.String(\"10.0.0.248/29\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tNetwork: producerNet.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = networkconnectivity.NewServiceConnectionPolicy(ctx, \"default\", \u0026networkconnectivity.ServiceConnectionPolicyArgs{\n\t\t\tName: pulumi.String(\"mypolicy\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tServiceClass: pulumi.String(\"gcp-memorystore-redis\"),\n\t\t\tDescription: pulumi.String(\"my basic service connection policy\"),\n\t\t\tNetwork: producerNet.ID(),\n\t\t\tPscConfig: \u0026networkconnectivity.ServiceConnectionPolicyPscConfigArgs{\n\t\t\t\tSubnetworks: pulumi.StringArray{\n\t\t\t\t\tproducerSubnet.ID(),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = redis.NewCluster(ctx, \"cluster-ha\", \u0026redis.ClusterArgs{\n\t\t\tName: pulumi.String(\"ha-cluster\"),\n\t\t\tShardCount: pulumi.Int(3),\n\t\t\tPscConfigs: redis.ClusterPscConfigArray{\n\t\t\t\t\u0026redis.ClusterPscConfigArgs{\n\t\t\t\t\tNetwork: producerNet.ID(),\n\t\t\t\t},\n\t\t\t},\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tReplicaCount: pulumi.Int(1),\n\t\t\tNodeType: pulumi.String(\"REDIS_SHARED_CORE_NANO\"),\n\t\t\tTransitEncryptionMode: pulumi.String(\"TRANSIT_ENCRYPTION_MODE_DISABLED\"),\n\t\t\tAuthorizationMode: pulumi.String(\"AUTH_MODE_DISABLED\"),\n\t\t\tRedisConfigs: pulumi.StringMap{\n\t\t\t\t\"maxmemory-policy\": pulumi.String(\"volatile-ttl\"),\n\t\t\t},\n\t\t\tDeletionProtectionEnabled: pulumi.Bool(true),\n\t\t\tZoneDistributionConfig: \u0026redis.ClusterZoneDistributionConfigArgs{\n\t\t\t\tMode: pulumi.String(\"MULTI_ZONE\"),\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\t_default,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.compute.Subnetwork;\nimport com.pulumi.gcp.compute.SubnetworkArgs;\nimport com.pulumi.gcp.networkconnectivity.ServiceConnectionPolicy;\nimport com.pulumi.gcp.networkconnectivity.ServiceConnectionPolicyArgs;\nimport com.pulumi.gcp.networkconnectivity.inputs.ServiceConnectionPolicyPscConfigArgs;\nimport com.pulumi.gcp.redis.Cluster;\nimport com.pulumi.gcp.redis.ClusterArgs;\nimport com.pulumi.gcp.redis.inputs.ClusterPscConfigArgs;\nimport com.pulumi.gcp.redis.inputs.ClusterZoneDistributionConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var producerNet = new Network(\"producerNet\", NetworkArgs.builder()\n .name(\"mynetwork\")\n .autoCreateSubnetworks(false)\n .build());\n\n var producerSubnet = new Subnetwork(\"producerSubnet\", SubnetworkArgs.builder()\n .name(\"mysubnet\")\n .ipCidrRange(\"10.0.0.248/29\")\n .region(\"us-central1\")\n .network(producerNet.id())\n .build());\n\n var default_ = new ServiceConnectionPolicy(\"default\", ServiceConnectionPolicyArgs.builder()\n .name(\"mypolicy\")\n .location(\"us-central1\")\n .serviceClass(\"gcp-memorystore-redis\")\n .description(\"my basic service connection policy\")\n .network(producerNet.id())\n .pscConfig(ServiceConnectionPolicyPscConfigArgs.builder()\n .subnetworks(producerSubnet.id())\n .build())\n .build());\n\n var cluster_ha = new Cluster(\"cluster-ha\", ClusterArgs.builder()\n .name(\"ha-cluster\")\n .shardCount(3)\n .pscConfigs(ClusterPscConfigArgs.builder()\n .network(producerNet.id())\n .build())\n .region(\"us-central1\")\n .replicaCount(1)\n .nodeType(\"REDIS_SHARED_CORE_NANO\")\n .transitEncryptionMode(\"TRANSIT_ENCRYPTION_MODE_DISABLED\")\n .authorizationMode(\"AUTH_MODE_DISABLED\")\n .redisConfigs(Map.of(\"maxmemory-policy\", \"volatile-ttl\"))\n .deletionProtectionEnabled(true)\n .zoneDistributionConfig(ClusterZoneDistributionConfigArgs.builder()\n .mode(\"MULTI_ZONE\")\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(default_)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster-ha:\n type: gcp:redis:Cluster\n properties:\n name: ha-cluster\n shardCount: 3\n pscConfigs:\n - network: ${producerNet.id}\n region: us-central1\n replicaCount: 1\n nodeType: REDIS_SHARED_CORE_NANO\n transitEncryptionMode: TRANSIT_ENCRYPTION_MODE_DISABLED\n authorizationMode: AUTH_MODE_DISABLED\n redisConfigs:\n maxmemory-policy: volatile-ttl\n deletionProtectionEnabled: true\n zoneDistributionConfig:\n mode: MULTI_ZONE\n options:\n dependson:\n - ${default}\n default:\n type: gcp:networkconnectivity:ServiceConnectionPolicy\n properties:\n name: mypolicy\n location: us-central1\n serviceClass: gcp-memorystore-redis\n description: my basic service connection policy\n network: ${producerNet.id}\n pscConfig:\n subnetworks:\n - ${producerSubnet.id}\n producerSubnet:\n type: gcp:compute:Subnetwork\n name: producer_subnet\n properties:\n name: mysubnet\n ipCidrRange: 10.0.0.248/29\n region: us-central1\n network: ${producerNet.id}\n producerNet:\n type: gcp:compute:Network\n name: producer_net\n properties:\n name: mynetwork\n autoCreateSubnetworks: false\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Redis Cluster Ha Single Zone\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst producerNet = new gcp.compute.Network(\"producer_net\", {\n name: \"mynetwork\",\n autoCreateSubnetworks: false,\n});\nconst producerSubnet = new gcp.compute.Subnetwork(\"producer_subnet\", {\n name: \"mysubnet\",\n ipCidrRange: \"10.0.0.248/29\",\n region: \"us-central1\",\n network: producerNet.id,\n});\nconst _default = new gcp.networkconnectivity.ServiceConnectionPolicy(\"default\", {\n name: \"mypolicy\",\n location: \"us-central1\",\n serviceClass: \"gcp-memorystore-redis\",\n description: \"my basic service connection policy\",\n network: producerNet.id,\n pscConfig: {\n subnetworks: [producerSubnet.id],\n },\n});\nconst cluster_ha_single_zone = new gcp.redis.Cluster(\"cluster-ha-single-zone\", {\n name: \"ha-cluster-single-zone\",\n shardCount: 3,\n pscConfigs: [{\n network: producerNet.id,\n }],\n region: \"us-central1\",\n zoneDistributionConfig: {\n mode: \"SINGLE_ZONE\",\n zone: \"us-central1-f\",\n },\n deletionProtectionEnabled: true,\n}, {\n dependsOn: [_default],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproducer_net = gcp.compute.Network(\"producer_net\",\n name=\"mynetwork\",\n auto_create_subnetworks=False)\nproducer_subnet = gcp.compute.Subnetwork(\"producer_subnet\",\n name=\"mysubnet\",\n ip_cidr_range=\"10.0.0.248/29\",\n region=\"us-central1\",\n network=producer_net.id)\ndefault = gcp.networkconnectivity.ServiceConnectionPolicy(\"default\",\n name=\"mypolicy\",\n location=\"us-central1\",\n service_class=\"gcp-memorystore-redis\",\n description=\"my basic service connection policy\",\n network=producer_net.id,\n psc_config={\n \"subnetworks\": [producer_subnet.id],\n })\ncluster_ha_single_zone = gcp.redis.Cluster(\"cluster-ha-single-zone\",\n name=\"ha-cluster-single-zone\",\n shard_count=3,\n psc_configs=[{\n \"network\": producer_net.id,\n }],\n region=\"us-central1\",\n zone_distribution_config={\n \"mode\": \"SINGLE_ZONE\",\n \"zone\": \"us-central1-f\",\n },\n deletion_protection_enabled=True,\n opts = pulumi.ResourceOptions(depends_on=[default]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var producerNet = new Gcp.Compute.Network(\"producer_net\", new()\n {\n Name = \"mynetwork\",\n AutoCreateSubnetworks = false,\n });\n\n var producerSubnet = new Gcp.Compute.Subnetwork(\"producer_subnet\", new()\n {\n Name = \"mysubnet\",\n IpCidrRange = \"10.0.0.248/29\",\n Region = \"us-central1\",\n Network = producerNet.Id,\n });\n\n var @default = new Gcp.NetworkConnectivity.ServiceConnectionPolicy(\"default\", new()\n {\n Name = \"mypolicy\",\n Location = \"us-central1\",\n ServiceClass = \"gcp-memorystore-redis\",\n Description = \"my basic service connection policy\",\n Network = producerNet.Id,\n PscConfig = new Gcp.NetworkConnectivity.Inputs.ServiceConnectionPolicyPscConfigArgs\n {\n Subnetworks = new[]\n {\n producerSubnet.Id,\n },\n },\n });\n\n var cluster_ha_single_zone = new Gcp.Redis.Cluster(\"cluster-ha-single-zone\", new()\n {\n Name = \"ha-cluster-single-zone\",\n ShardCount = 3,\n PscConfigs = new[]\n {\n new Gcp.Redis.Inputs.ClusterPscConfigArgs\n {\n Network = producerNet.Id,\n },\n },\n Region = \"us-central1\",\n ZoneDistributionConfig = new Gcp.Redis.Inputs.ClusterZoneDistributionConfigArgs\n {\n Mode = \"SINGLE_ZONE\",\n Zone = \"us-central1-f\",\n },\n DeletionProtectionEnabled = true,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n @default,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networkconnectivity\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/redis\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tproducerNet, err := compute.NewNetwork(ctx, \"producer_net\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"mynetwork\"),\n\t\t\tAutoCreateSubnetworks: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproducerSubnet, err := compute.NewSubnetwork(ctx, \"producer_subnet\", \u0026compute.SubnetworkArgs{\n\t\t\tName: pulumi.String(\"mysubnet\"),\n\t\t\tIpCidrRange: pulumi.String(\"10.0.0.248/29\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tNetwork: producerNet.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = networkconnectivity.NewServiceConnectionPolicy(ctx, \"default\", \u0026networkconnectivity.ServiceConnectionPolicyArgs{\n\t\t\tName: pulumi.String(\"mypolicy\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tServiceClass: pulumi.String(\"gcp-memorystore-redis\"),\n\t\t\tDescription: pulumi.String(\"my basic service connection policy\"),\n\t\t\tNetwork: producerNet.ID(),\n\t\t\tPscConfig: \u0026networkconnectivity.ServiceConnectionPolicyPscConfigArgs{\n\t\t\t\tSubnetworks: pulumi.StringArray{\n\t\t\t\t\tproducerSubnet.ID(),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = redis.NewCluster(ctx, \"cluster-ha-single-zone\", \u0026redis.ClusterArgs{\n\t\t\tName: pulumi.String(\"ha-cluster-single-zone\"),\n\t\t\tShardCount: pulumi.Int(3),\n\t\t\tPscConfigs: redis.ClusterPscConfigArray{\n\t\t\t\t\u0026redis.ClusterPscConfigArgs{\n\t\t\t\t\tNetwork: producerNet.ID(),\n\t\t\t\t},\n\t\t\t},\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tZoneDistributionConfig: \u0026redis.ClusterZoneDistributionConfigArgs{\n\t\t\t\tMode: pulumi.String(\"SINGLE_ZONE\"),\n\t\t\t\tZone: pulumi.String(\"us-central1-f\"),\n\t\t\t},\n\t\t\tDeletionProtectionEnabled: pulumi.Bool(true),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\t_default,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.compute.Subnetwork;\nimport com.pulumi.gcp.compute.SubnetworkArgs;\nimport com.pulumi.gcp.networkconnectivity.ServiceConnectionPolicy;\nimport com.pulumi.gcp.networkconnectivity.ServiceConnectionPolicyArgs;\nimport com.pulumi.gcp.networkconnectivity.inputs.ServiceConnectionPolicyPscConfigArgs;\nimport com.pulumi.gcp.redis.Cluster;\nimport com.pulumi.gcp.redis.ClusterArgs;\nimport com.pulumi.gcp.redis.inputs.ClusterPscConfigArgs;\nimport com.pulumi.gcp.redis.inputs.ClusterZoneDistributionConfigArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var producerNet = new Network(\"producerNet\", NetworkArgs.builder()\n .name(\"mynetwork\")\n .autoCreateSubnetworks(false)\n .build());\n\n var producerSubnet = new Subnetwork(\"producerSubnet\", SubnetworkArgs.builder()\n .name(\"mysubnet\")\n .ipCidrRange(\"10.0.0.248/29\")\n .region(\"us-central1\")\n .network(producerNet.id())\n .build());\n\n var default_ = new ServiceConnectionPolicy(\"default\", ServiceConnectionPolicyArgs.builder()\n .name(\"mypolicy\")\n .location(\"us-central1\")\n .serviceClass(\"gcp-memorystore-redis\")\n .description(\"my basic service connection policy\")\n .network(producerNet.id())\n .pscConfig(ServiceConnectionPolicyPscConfigArgs.builder()\n .subnetworks(producerSubnet.id())\n .build())\n .build());\n\n var cluster_ha_single_zone = new Cluster(\"cluster-ha-single-zone\", ClusterArgs.builder()\n .name(\"ha-cluster-single-zone\")\n .shardCount(3)\n .pscConfigs(ClusterPscConfigArgs.builder()\n .network(producerNet.id())\n .build())\n .region(\"us-central1\")\n .zoneDistributionConfig(ClusterZoneDistributionConfigArgs.builder()\n .mode(\"SINGLE_ZONE\")\n .zone(\"us-central1-f\")\n .build())\n .deletionProtectionEnabled(true)\n .build(), CustomResourceOptions.builder()\n .dependsOn(default_)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster-ha-single-zone:\n type: gcp:redis:Cluster\n properties:\n name: ha-cluster-single-zone\n shardCount: 3\n pscConfigs:\n - network: ${producerNet.id}\n region: us-central1\n zoneDistributionConfig:\n mode: SINGLE_ZONE\n zone: us-central1-f\n deletionProtectionEnabled: true\n options:\n dependson:\n - ${default}\n default:\n type: gcp:networkconnectivity:ServiceConnectionPolicy\n properties:\n name: mypolicy\n location: us-central1\n serviceClass: gcp-memorystore-redis\n description: my basic service connection policy\n network: ${producerNet.id}\n pscConfig:\n subnetworks:\n - ${producerSubnet.id}\n producerSubnet:\n type: gcp:compute:Subnetwork\n name: producer_subnet\n properties:\n name: mysubnet\n ipCidrRange: 10.0.0.248/29\n region: us-central1\n network: ${producerNet.id}\n producerNet:\n type: gcp:compute:Network\n name: producer_net\n properties:\n name: mynetwork\n autoCreateSubnetworks: false\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nCluster can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{region}}/clusters/{{name}}`\n\n* `{{project}}/{{region}}/{{name}}`\n\n* `{{region}}/{{name}}`\n\n* `{{name}}`\n\nWhen using the `pulumi import` command, Cluster can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:redis/cluster:Cluster default projects/{{project}}/locations/{{region}}/clusters/{{name}}\n```\n\n```sh\n$ pulumi import gcp:redis/cluster:Cluster default {{project}}/{{region}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:redis/cluster:Cluster default {{region}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:redis/cluster:Cluster default {{name}}\n```\n\n", + "description": "A Google Cloud Redis Cluster instance.\n\n\nTo get more information about Cluster, see:\n\n* [API documentation](https://cloud.google.com/memorystore/docs/cluster/reference/rest/v1/projects.locations.clusters)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/memorystore/docs/cluster/)\n\n## Example Usage\n\n### Redis Cluster Ha\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst producerNet = new gcp.compute.Network(\"producer_net\", {\n name: \"mynetwork\",\n autoCreateSubnetworks: false,\n});\nconst producerSubnet = new gcp.compute.Subnetwork(\"producer_subnet\", {\n name: \"mysubnet\",\n ipCidrRange: \"10.0.0.248/29\",\n region: \"us-central1\",\n network: producerNet.id,\n});\nconst _default = new gcp.networkconnectivity.ServiceConnectionPolicy(\"default\", {\n name: \"mypolicy\",\n location: \"us-central1\",\n serviceClass: \"gcp-memorystore-redis\",\n description: \"my basic service connection policy\",\n network: producerNet.id,\n pscConfig: {\n subnetworks: [producerSubnet.id],\n },\n});\nconst cluster_ha = new gcp.redis.Cluster(\"cluster-ha\", {\n name: \"ha-cluster\",\n shardCount: 3,\n pscConfigs: [{\n network: producerNet.id,\n }],\n region: \"us-central1\",\n replicaCount: 1,\n nodeType: \"REDIS_SHARED_CORE_NANO\",\n transitEncryptionMode: \"TRANSIT_ENCRYPTION_MODE_DISABLED\",\n authorizationMode: \"AUTH_MODE_DISABLED\",\n redisConfigs: {\n \"maxmemory-policy\": \"volatile-ttl\",\n },\n deletionProtectionEnabled: true,\n zoneDistributionConfig: {\n mode: \"MULTI_ZONE\",\n },\n maintenancePolicy: {\n weeklyMaintenanceWindows: [{\n day: \"MONDAY\",\n startTime: {\n hours: 1,\n minutes: 0,\n seconds: 0,\n nanos: 0,\n },\n }],\n },\n}, {\n dependsOn: [_default],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproducer_net = gcp.compute.Network(\"producer_net\",\n name=\"mynetwork\",\n auto_create_subnetworks=False)\nproducer_subnet = gcp.compute.Subnetwork(\"producer_subnet\",\n name=\"mysubnet\",\n ip_cidr_range=\"10.0.0.248/29\",\n region=\"us-central1\",\n network=producer_net.id)\ndefault = gcp.networkconnectivity.ServiceConnectionPolicy(\"default\",\n name=\"mypolicy\",\n location=\"us-central1\",\n service_class=\"gcp-memorystore-redis\",\n description=\"my basic service connection policy\",\n network=producer_net.id,\n psc_config={\n \"subnetworks\": [producer_subnet.id],\n })\ncluster_ha = gcp.redis.Cluster(\"cluster-ha\",\n name=\"ha-cluster\",\n shard_count=3,\n psc_configs=[{\n \"network\": producer_net.id,\n }],\n region=\"us-central1\",\n replica_count=1,\n node_type=\"REDIS_SHARED_CORE_NANO\",\n transit_encryption_mode=\"TRANSIT_ENCRYPTION_MODE_DISABLED\",\n authorization_mode=\"AUTH_MODE_DISABLED\",\n redis_configs={\n \"maxmemory-policy\": \"volatile-ttl\",\n },\n deletion_protection_enabled=True,\n zone_distribution_config={\n \"mode\": \"MULTI_ZONE\",\n },\n maintenance_policy={\n \"weekly_maintenance_windows\": [{\n \"day\": \"MONDAY\",\n \"start_time\": {\n \"hours\": 1,\n \"minutes\": 0,\n \"seconds\": 0,\n \"nanos\": 0,\n },\n }],\n },\n opts = pulumi.ResourceOptions(depends_on=[default]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var producerNet = new Gcp.Compute.Network(\"producer_net\", new()\n {\n Name = \"mynetwork\",\n AutoCreateSubnetworks = false,\n });\n\n var producerSubnet = new Gcp.Compute.Subnetwork(\"producer_subnet\", new()\n {\n Name = \"mysubnet\",\n IpCidrRange = \"10.0.0.248/29\",\n Region = \"us-central1\",\n Network = producerNet.Id,\n });\n\n var @default = new Gcp.NetworkConnectivity.ServiceConnectionPolicy(\"default\", new()\n {\n Name = \"mypolicy\",\n Location = \"us-central1\",\n ServiceClass = \"gcp-memorystore-redis\",\n Description = \"my basic service connection policy\",\n Network = producerNet.Id,\n PscConfig = new Gcp.NetworkConnectivity.Inputs.ServiceConnectionPolicyPscConfigArgs\n {\n Subnetworks = new[]\n {\n producerSubnet.Id,\n },\n },\n });\n\n var cluster_ha = new Gcp.Redis.Cluster(\"cluster-ha\", new()\n {\n Name = \"ha-cluster\",\n ShardCount = 3,\n PscConfigs = new[]\n {\n new Gcp.Redis.Inputs.ClusterPscConfigArgs\n {\n Network = producerNet.Id,\n },\n },\n Region = \"us-central1\",\n ReplicaCount = 1,\n NodeType = \"REDIS_SHARED_CORE_NANO\",\n TransitEncryptionMode = \"TRANSIT_ENCRYPTION_MODE_DISABLED\",\n AuthorizationMode = \"AUTH_MODE_DISABLED\",\n RedisConfigs = \n {\n { \"maxmemory-policy\", \"volatile-ttl\" },\n },\n DeletionProtectionEnabled = true,\n ZoneDistributionConfig = new Gcp.Redis.Inputs.ClusterZoneDistributionConfigArgs\n {\n Mode = \"MULTI_ZONE\",\n },\n MaintenancePolicy = new Gcp.Redis.Inputs.ClusterMaintenancePolicyArgs\n {\n WeeklyMaintenanceWindows = new[]\n {\n new Gcp.Redis.Inputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs\n {\n Day = \"MONDAY\",\n StartTime = new Gcp.Redis.Inputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs\n {\n Hours = 1,\n Minutes = 0,\n Seconds = 0,\n Nanos = 0,\n },\n },\n },\n },\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n @default,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networkconnectivity\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/redis\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tproducerNet, err := compute.NewNetwork(ctx, \"producer_net\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"mynetwork\"),\n\t\t\tAutoCreateSubnetworks: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproducerSubnet, err := compute.NewSubnetwork(ctx, \"producer_subnet\", \u0026compute.SubnetworkArgs{\n\t\t\tName: pulumi.String(\"mysubnet\"),\n\t\t\tIpCidrRange: pulumi.String(\"10.0.0.248/29\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tNetwork: producerNet.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = networkconnectivity.NewServiceConnectionPolicy(ctx, \"default\", \u0026networkconnectivity.ServiceConnectionPolicyArgs{\n\t\t\tName: pulumi.String(\"mypolicy\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tServiceClass: pulumi.String(\"gcp-memorystore-redis\"),\n\t\t\tDescription: pulumi.String(\"my basic service connection policy\"),\n\t\t\tNetwork: producerNet.ID(),\n\t\t\tPscConfig: \u0026networkconnectivity.ServiceConnectionPolicyPscConfigArgs{\n\t\t\t\tSubnetworks: pulumi.StringArray{\n\t\t\t\t\tproducerSubnet.ID(),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = redis.NewCluster(ctx, \"cluster-ha\", \u0026redis.ClusterArgs{\n\t\t\tName: pulumi.String(\"ha-cluster\"),\n\t\t\tShardCount: pulumi.Int(3),\n\t\t\tPscConfigs: redis.ClusterPscConfigArray{\n\t\t\t\t\u0026redis.ClusterPscConfigArgs{\n\t\t\t\t\tNetwork: producerNet.ID(),\n\t\t\t\t},\n\t\t\t},\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tReplicaCount: pulumi.Int(1),\n\t\t\tNodeType: pulumi.String(\"REDIS_SHARED_CORE_NANO\"),\n\t\t\tTransitEncryptionMode: pulumi.String(\"TRANSIT_ENCRYPTION_MODE_DISABLED\"),\n\t\t\tAuthorizationMode: pulumi.String(\"AUTH_MODE_DISABLED\"),\n\t\t\tRedisConfigs: pulumi.StringMap{\n\t\t\t\t\"maxmemory-policy\": pulumi.String(\"volatile-ttl\"),\n\t\t\t},\n\t\t\tDeletionProtectionEnabled: pulumi.Bool(true),\n\t\t\tZoneDistributionConfig: \u0026redis.ClusterZoneDistributionConfigArgs{\n\t\t\t\tMode: pulumi.String(\"MULTI_ZONE\"),\n\t\t\t},\n\t\t\tMaintenancePolicy: \u0026redis.ClusterMaintenancePolicyArgs{\n\t\t\t\tWeeklyMaintenanceWindows: redis.ClusterMaintenancePolicyWeeklyMaintenanceWindowArray{\n\t\t\t\t\t\u0026redis.ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs{\n\t\t\t\t\t\tDay: pulumi.String(\"MONDAY\"),\n\t\t\t\t\t\tStartTime: \u0026redis.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs{\n\t\t\t\t\t\t\tHours: pulumi.Int(1),\n\t\t\t\t\t\t\tMinutes: pulumi.Int(0),\n\t\t\t\t\t\t\tSeconds: pulumi.Int(0),\n\t\t\t\t\t\t\tNanos: pulumi.Int(0),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\t_default,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.compute.Subnetwork;\nimport com.pulumi.gcp.compute.SubnetworkArgs;\nimport com.pulumi.gcp.networkconnectivity.ServiceConnectionPolicy;\nimport com.pulumi.gcp.networkconnectivity.ServiceConnectionPolicyArgs;\nimport com.pulumi.gcp.networkconnectivity.inputs.ServiceConnectionPolicyPscConfigArgs;\nimport com.pulumi.gcp.redis.Cluster;\nimport com.pulumi.gcp.redis.ClusterArgs;\nimport com.pulumi.gcp.redis.inputs.ClusterPscConfigArgs;\nimport com.pulumi.gcp.redis.inputs.ClusterZoneDistributionConfigArgs;\nimport com.pulumi.gcp.redis.inputs.ClusterMaintenancePolicyArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var producerNet = new Network(\"producerNet\", NetworkArgs.builder()\n .name(\"mynetwork\")\n .autoCreateSubnetworks(false)\n .build());\n\n var producerSubnet = new Subnetwork(\"producerSubnet\", SubnetworkArgs.builder()\n .name(\"mysubnet\")\n .ipCidrRange(\"10.0.0.248/29\")\n .region(\"us-central1\")\n .network(producerNet.id())\n .build());\n\n var default_ = new ServiceConnectionPolicy(\"default\", ServiceConnectionPolicyArgs.builder()\n .name(\"mypolicy\")\n .location(\"us-central1\")\n .serviceClass(\"gcp-memorystore-redis\")\n .description(\"my basic service connection policy\")\n .network(producerNet.id())\n .pscConfig(ServiceConnectionPolicyPscConfigArgs.builder()\n .subnetworks(producerSubnet.id())\n .build())\n .build());\n\n var cluster_ha = new Cluster(\"cluster-ha\", ClusterArgs.builder()\n .name(\"ha-cluster\")\n .shardCount(3)\n .pscConfigs(ClusterPscConfigArgs.builder()\n .network(producerNet.id())\n .build())\n .region(\"us-central1\")\n .replicaCount(1)\n .nodeType(\"REDIS_SHARED_CORE_NANO\")\n .transitEncryptionMode(\"TRANSIT_ENCRYPTION_MODE_DISABLED\")\n .authorizationMode(\"AUTH_MODE_DISABLED\")\n .redisConfigs(Map.of(\"maxmemory-policy\", \"volatile-ttl\"))\n .deletionProtectionEnabled(true)\n .zoneDistributionConfig(ClusterZoneDistributionConfigArgs.builder()\n .mode(\"MULTI_ZONE\")\n .build())\n .maintenancePolicy(ClusterMaintenancePolicyArgs.builder()\n .weeklyMaintenanceWindows(ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs.builder()\n .day(\"MONDAY\")\n .startTime(ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs.builder()\n .hours(1)\n .minutes(0)\n .seconds(0)\n .nanos(0)\n .build())\n .build())\n .build())\n .build(), CustomResourceOptions.builder()\n .dependsOn(default_)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster-ha:\n type: gcp:redis:Cluster\n properties:\n name: ha-cluster\n shardCount: 3\n pscConfigs:\n - network: ${producerNet.id}\n region: us-central1\n replicaCount: 1\n nodeType: REDIS_SHARED_CORE_NANO\n transitEncryptionMode: TRANSIT_ENCRYPTION_MODE_DISABLED\n authorizationMode: AUTH_MODE_DISABLED\n redisConfigs:\n maxmemory-policy: volatile-ttl\n deletionProtectionEnabled: true\n zoneDistributionConfig:\n mode: MULTI_ZONE\n maintenancePolicy:\n weeklyMaintenanceWindows:\n - day: MONDAY\n startTime:\n hours: 1\n minutes: 0\n seconds: 0\n nanos: 0\n options:\n dependson:\n - ${default}\n default:\n type: gcp:networkconnectivity:ServiceConnectionPolicy\n properties:\n name: mypolicy\n location: us-central1\n serviceClass: gcp-memorystore-redis\n description: my basic service connection policy\n network: ${producerNet.id}\n pscConfig:\n subnetworks:\n - ${producerSubnet.id}\n producerSubnet:\n type: gcp:compute:Subnetwork\n name: producer_subnet\n properties:\n name: mysubnet\n ipCidrRange: 10.0.0.248/29\n region: us-central1\n network: ${producerNet.id}\n producerNet:\n type: gcp:compute:Network\n name: producer_net\n properties:\n name: mynetwork\n autoCreateSubnetworks: false\n```\n\u003c!--End PulumiCodeChooser --\u003e\n### Redis Cluster Ha Single Zone\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst producerNet = new gcp.compute.Network(\"producer_net\", {\n name: \"mynetwork\",\n autoCreateSubnetworks: false,\n});\nconst producerSubnet = new gcp.compute.Subnetwork(\"producer_subnet\", {\n name: \"mysubnet\",\n ipCidrRange: \"10.0.0.248/29\",\n region: \"us-central1\",\n network: producerNet.id,\n});\nconst _default = new gcp.networkconnectivity.ServiceConnectionPolicy(\"default\", {\n name: \"mypolicy\",\n location: \"us-central1\",\n serviceClass: \"gcp-memorystore-redis\",\n description: \"my basic service connection policy\",\n network: producerNet.id,\n pscConfig: {\n subnetworks: [producerSubnet.id],\n },\n});\nconst cluster_ha_single_zone = new gcp.redis.Cluster(\"cluster-ha-single-zone\", {\n name: \"ha-cluster-single-zone\",\n shardCount: 3,\n pscConfigs: [{\n network: producerNet.id,\n }],\n region: \"us-central1\",\n zoneDistributionConfig: {\n mode: \"SINGLE_ZONE\",\n zone: \"us-central1-f\",\n },\n maintenancePolicy: {\n weeklyMaintenanceWindows: [{\n day: \"MONDAY\",\n startTime: {\n hours: 1,\n minutes: 0,\n seconds: 0,\n nanos: 0,\n },\n }],\n },\n deletionProtectionEnabled: true,\n}, {\n dependsOn: [_default],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproducer_net = gcp.compute.Network(\"producer_net\",\n name=\"mynetwork\",\n auto_create_subnetworks=False)\nproducer_subnet = gcp.compute.Subnetwork(\"producer_subnet\",\n name=\"mysubnet\",\n ip_cidr_range=\"10.0.0.248/29\",\n region=\"us-central1\",\n network=producer_net.id)\ndefault = gcp.networkconnectivity.ServiceConnectionPolicy(\"default\",\n name=\"mypolicy\",\n location=\"us-central1\",\n service_class=\"gcp-memorystore-redis\",\n description=\"my basic service connection policy\",\n network=producer_net.id,\n psc_config={\n \"subnetworks\": [producer_subnet.id],\n })\ncluster_ha_single_zone = gcp.redis.Cluster(\"cluster-ha-single-zone\",\n name=\"ha-cluster-single-zone\",\n shard_count=3,\n psc_configs=[{\n \"network\": producer_net.id,\n }],\n region=\"us-central1\",\n zone_distribution_config={\n \"mode\": \"SINGLE_ZONE\",\n \"zone\": \"us-central1-f\",\n },\n maintenance_policy={\n \"weekly_maintenance_windows\": [{\n \"day\": \"MONDAY\",\n \"start_time\": {\n \"hours\": 1,\n \"minutes\": 0,\n \"seconds\": 0,\n \"nanos\": 0,\n },\n }],\n },\n deletion_protection_enabled=True,\n opts = pulumi.ResourceOptions(depends_on=[default]))\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var producerNet = new Gcp.Compute.Network(\"producer_net\", new()\n {\n Name = \"mynetwork\",\n AutoCreateSubnetworks = false,\n });\n\n var producerSubnet = new Gcp.Compute.Subnetwork(\"producer_subnet\", new()\n {\n Name = \"mysubnet\",\n IpCidrRange = \"10.0.0.248/29\",\n Region = \"us-central1\",\n Network = producerNet.Id,\n });\n\n var @default = new Gcp.NetworkConnectivity.ServiceConnectionPolicy(\"default\", new()\n {\n Name = \"mypolicy\",\n Location = \"us-central1\",\n ServiceClass = \"gcp-memorystore-redis\",\n Description = \"my basic service connection policy\",\n Network = producerNet.Id,\n PscConfig = new Gcp.NetworkConnectivity.Inputs.ServiceConnectionPolicyPscConfigArgs\n {\n Subnetworks = new[]\n {\n producerSubnet.Id,\n },\n },\n });\n\n var cluster_ha_single_zone = new Gcp.Redis.Cluster(\"cluster-ha-single-zone\", new()\n {\n Name = \"ha-cluster-single-zone\",\n ShardCount = 3,\n PscConfigs = new[]\n {\n new Gcp.Redis.Inputs.ClusterPscConfigArgs\n {\n Network = producerNet.Id,\n },\n },\n Region = \"us-central1\",\n ZoneDistributionConfig = new Gcp.Redis.Inputs.ClusterZoneDistributionConfigArgs\n {\n Mode = \"SINGLE_ZONE\",\n Zone = \"us-central1-f\",\n },\n MaintenancePolicy = new Gcp.Redis.Inputs.ClusterMaintenancePolicyArgs\n {\n WeeklyMaintenanceWindows = new[]\n {\n new Gcp.Redis.Inputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs\n {\n Day = \"MONDAY\",\n StartTime = new Gcp.Redis.Inputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs\n {\n Hours = 1,\n Minutes = 0,\n Seconds = 0,\n Nanos = 0,\n },\n },\n },\n },\n DeletionProtectionEnabled = true,\n }, new CustomResourceOptions\n {\n DependsOn =\n {\n @default,\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networkconnectivity\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/redis\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tproducerNet, err := compute.NewNetwork(ctx, \"producer_net\", \u0026compute.NetworkArgs{\n\t\t\tName: pulumi.String(\"mynetwork\"),\n\t\t\tAutoCreateSubnetworks: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproducerSubnet, err := compute.NewSubnetwork(ctx, \"producer_subnet\", \u0026compute.SubnetworkArgs{\n\t\t\tName: pulumi.String(\"mysubnet\"),\n\t\t\tIpCidrRange: pulumi.String(\"10.0.0.248/29\"),\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tNetwork: producerNet.ID(),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = networkconnectivity.NewServiceConnectionPolicy(ctx, \"default\", \u0026networkconnectivity.ServiceConnectionPolicyArgs{\n\t\t\tName: pulumi.String(\"mypolicy\"),\n\t\t\tLocation: pulumi.String(\"us-central1\"),\n\t\t\tServiceClass: pulumi.String(\"gcp-memorystore-redis\"),\n\t\t\tDescription: pulumi.String(\"my basic service connection policy\"),\n\t\t\tNetwork: producerNet.ID(),\n\t\t\tPscConfig: \u0026networkconnectivity.ServiceConnectionPolicyPscConfigArgs{\n\t\t\t\tSubnetworks: pulumi.StringArray{\n\t\t\t\t\tproducerSubnet.ID(),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = redis.NewCluster(ctx, \"cluster-ha-single-zone\", \u0026redis.ClusterArgs{\n\t\t\tName: pulumi.String(\"ha-cluster-single-zone\"),\n\t\t\tShardCount: pulumi.Int(3),\n\t\t\tPscConfigs: redis.ClusterPscConfigArray{\n\t\t\t\t\u0026redis.ClusterPscConfigArgs{\n\t\t\t\t\tNetwork: producerNet.ID(),\n\t\t\t\t},\n\t\t\t},\n\t\t\tRegion: pulumi.String(\"us-central1\"),\n\t\t\tZoneDistributionConfig: \u0026redis.ClusterZoneDistributionConfigArgs{\n\t\t\t\tMode: pulumi.String(\"SINGLE_ZONE\"),\n\t\t\t\tZone: pulumi.String(\"us-central1-f\"),\n\t\t\t},\n\t\t\tMaintenancePolicy: \u0026redis.ClusterMaintenancePolicyArgs{\n\t\t\t\tWeeklyMaintenanceWindows: redis.ClusterMaintenancePolicyWeeklyMaintenanceWindowArray{\n\t\t\t\t\t\u0026redis.ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs{\n\t\t\t\t\t\tDay: pulumi.String(\"MONDAY\"),\n\t\t\t\t\t\tStartTime: \u0026redis.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs{\n\t\t\t\t\t\t\tHours: pulumi.Int(1),\n\t\t\t\t\t\t\tMinutes: pulumi.Int(0),\n\t\t\t\t\t\t\tSeconds: pulumi.Int(0),\n\t\t\t\t\t\t\tNanos: pulumi.Int(0),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDeletionProtectionEnabled: pulumi.Bool(true),\n\t\t}, pulumi.DependsOn([]pulumi.Resource{\n\t\t\t_default,\n\t\t}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.compute.Network;\nimport com.pulumi.gcp.compute.NetworkArgs;\nimport com.pulumi.gcp.compute.Subnetwork;\nimport com.pulumi.gcp.compute.SubnetworkArgs;\nimport com.pulumi.gcp.networkconnectivity.ServiceConnectionPolicy;\nimport com.pulumi.gcp.networkconnectivity.ServiceConnectionPolicyArgs;\nimport com.pulumi.gcp.networkconnectivity.inputs.ServiceConnectionPolicyPscConfigArgs;\nimport com.pulumi.gcp.redis.Cluster;\nimport com.pulumi.gcp.redis.ClusterArgs;\nimport com.pulumi.gcp.redis.inputs.ClusterPscConfigArgs;\nimport com.pulumi.gcp.redis.inputs.ClusterZoneDistributionConfigArgs;\nimport com.pulumi.gcp.redis.inputs.ClusterMaintenancePolicyArgs;\nimport com.pulumi.resources.CustomResourceOptions;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var producerNet = new Network(\"producerNet\", NetworkArgs.builder()\n .name(\"mynetwork\")\n .autoCreateSubnetworks(false)\n .build());\n\n var producerSubnet = new Subnetwork(\"producerSubnet\", SubnetworkArgs.builder()\n .name(\"mysubnet\")\n .ipCidrRange(\"10.0.0.248/29\")\n .region(\"us-central1\")\n .network(producerNet.id())\n .build());\n\n var default_ = new ServiceConnectionPolicy(\"default\", ServiceConnectionPolicyArgs.builder()\n .name(\"mypolicy\")\n .location(\"us-central1\")\n .serviceClass(\"gcp-memorystore-redis\")\n .description(\"my basic service connection policy\")\n .network(producerNet.id())\n .pscConfig(ServiceConnectionPolicyPscConfigArgs.builder()\n .subnetworks(producerSubnet.id())\n .build())\n .build());\n\n var cluster_ha_single_zone = new Cluster(\"cluster-ha-single-zone\", ClusterArgs.builder()\n .name(\"ha-cluster-single-zone\")\n .shardCount(3)\n .pscConfigs(ClusterPscConfigArgs.builder()\n .network(producerNet.id())\n .build())\n .region(\"us-central1\")\n .zoneDistributionConfig(ClusterZoneDistributionConfigArgs.builder()\n .mode(\"SINGLE_ZONE\")\n .zone(\"us-central1-f\")\n .build())\n .maintenancePolicy(ClusterMaintenancePolicyArgs.builder()\n .weeklyMaintenanceWindows(ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs.builder()\n .day(\"MONDAY\")\n .startTime(ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs.builder()\n .hours(1)\n .minutes(0)\n .seconds(0)\n .nanos(0)\n .build())\n .build())\n .build())\n .deletionProtectionEnabled(true)\n .build(), CustomResourceOptions.builder()\n .dependsOn(default_)\n .build());\n\n }\n}\n```\n```yaml\nresources:\n cluster-ha-single-zone:\n type: gcp:redis:Cluster\n properties:\n name: ha-cluster-single-zone\n shardCount: 3\n pscConfigs:\n - network: ${producerNet.id}\n region: us-central1\n zoneDistributionConfig:\n mode: SINGLE_ZONE\n zone: us-central1-f\n maintenancePolicy:\n weeklyMaintenanceWindows:\n - day: MONDAY\n startTime:\n hours: 1\n minutes: 0\n seconds: 0\n nanos: 0\n deletionProtectionEnabled: true\n options:\n dependson:\n - ${default}\n default:\n type: gcp:networkconnectivity:ServiceConnectionPolicy\n properties:\n name: mypolicy\n location: us-central1\n serviceClass: gcp-memorystore-redis\n description: my basic service connection policy\n network: ${producerNet.id}\n pscConfig:\n subnetworks:\n - ${producerSubnet.id}\n producerSubnet:\n type: gcp:compute:Subnetwork\n name: producer_subnet\n properties:\n name: mysubnet\n ipCidrRange: 10.0.0.248/29\n region: us-central1\n network: ${producerNet.id}\n producerNet:\n type: gcp:compute:Network\n name: producer_net\n properties:\n name: mynetwork\n autoCreateSubnetworks: false\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nCluster can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{region}}/clusters/{{name}}`\n\n* `{{project}}/{{region}}/{{name}}`\n\n* `{{region}}/{{name}}`\n\n* `{{name}}`\n\nWhen using the `pulumi import` command, Cluster can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:redis/cluster:Cluster default projects/{{project}}/locations/{{region}}/clusters/{{name}}\n```\n\n```sh\n$ pulumi import gcp:redis/cluster:Cluster default {{project}}/{{region}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:redis/cluster:Cluster default {{region}}/{{name}}\n```\n\n```sh\n$ pulumi import gcp:redis/cluster:Cluster default {{name}}\n```\n\n", "properties": { "authorizationMode": { "type": "string", @@ -232457,6 +234060,17 @@ }, "description": "Output only. Endpoints created on each given network,\nfor Redis clients to connect to the cluster.\nCurrently only one endpoint is supported.\nStructure is documented below.\n" }, + "maintenancePolicy": { + "$ref": "#/types/gcp:redis/ClusterMaintenancePolicy:ClusterMaintenancePolicy", + "description": "Maintenance policy for a cluster\n" + }, + "maintenanceSchedules": { + "type": "array", + "items": { + "$ref": "#/types/gcp:redis/ClusterMaintenanceSchedule:ClusterMaintenanceSchedule" + }, + "description": "Upcoming maintenance schedule.\nStructure is documented below.\n" + }, "name": { "type": "string", "description": "Unique name of the resource in this scope including project and location using the form:\nprojects/{projectId}/locations/{locationId}/clusters/{clusterId}\n" @@ -232536,6 +234150,7 @@ "required": [ "createTime", "discoveryEndpoints", + "maintenanceSchedules", "name", "nodeType", "preciseSizeGb", @@ -232559,6 +234174,10 @@ "type": "boolean", "description": "Optional. Indicates if the cluster is deletion protected or not. If the value if set to true, any delete cluster\noperation will fail. Default value is true.\n" }, + "maintenancePolicy": { + "$ref": "#/types/gcp:redis/ClusterMaintenancePolicy:ClusterMaintenancePolicy", + "description": "Maintenance policy for a cluster\n" + }, "name": { "type": "string", "description": "Unique name of the resource in this scope including project and location using the form:\nprojects/{projectId}/locations/{locationId}/clusters/{clusterId}\n", @@ -232638,6 +234257,17 @@ }, "description": "Output only. Endpoints created on each given network,\nfor Redis clients to connect to the cluster.\nCurrently only one endpoint is supported.\nStructure is documented below.\n" }, + "maintenancePolicy": { + "$ref": "#/types/gcp:redis/ClusterMaintenancePolicy:ClusterMaintenancePolicy", + "description": "Maintenance policy for a cluster\n" + }, + "maintenanceSchedules": { + "type": "array", + "items": { + "$ref": "#/types/gcp:redis/ClusterMaintenanceSchedule:ClusterMaintenanceSchedule" + }, + "description": "Upcoming maintenance schedule.\nStructure is documented below.\n" + }, "name": { "type": "string", "description": "Unique name of the resource in this scope including project and location using the form:\nprojects/{projectId}/locations/{locationId}/clusters/{clusterId}\n", @@ -237629,6 +239259,150 @@ "type": "object" } }, + "gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport": { + "description": "A Cloud Security Command Center (Cloud SCC) Big Query Export Config.\nIt represents exporting Security Command Center data, including assets, findings, and security marks\nusing gcloud scc bqexports\n\u003e **Note:** In order to use Cloud SCC resources, your organization must be enrolled\nin [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center).\nWithout doing so, you may run into errors during resource creation.\n\n\nTo get more information about FolderSccBigQueryExport, see:\n\n* [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/folders.locations.bigQueryExports)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query)\n\n## Example Usage\n\n### Scc V2 Folder Big Query Export Config Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst folder = new gcp.organizations.Folder(\"folder\", {\n parent: \"organizations/123456789\",\n displayName: \"folder-name\",\n deletionProtection: false,\n});\nconst _default = new gcp.bigquery.Dataset(\"default\", {\n datasetId: \"my_dataset_id\",\n friendlyName: \"test\",\n description: \"This is a test description\",\n location: \"US\",\n defaultTableExpirationMs: 3600000,\n defaultPartitionExpirationMs: undefined,\n labels: {\n env: \"default\",\n },\n});\nconst customBigQueryExportConfig = new gcp.securitycenter.V2FolderSccBigQueryExport(\"custom_big_query_export_config\", {\n bigQueryExportId: \"my-export\",\n folder: folder.folderId,\n dataset: _default.id,\n location: \"global\",\n description: \"Cloud Security Command Center Findings Big Query Export Config\",\n filter: \"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nfolder = gcp.organizations.Folder(\"folder\",\n parent=\"organizations/123456789\",\n display_name=\"folder-name\",\n deletion_protection=False)\ndefault = gcp.bigquery.Dataset(\"default\",\n dataset_id=\"my_dataset_id\",\n friendly_name=\"test\",\n description=\"This is a test description\",\n location=\"US\",\n default_table_expiration_ms=3600000,\n default_partition_expiration_ms=None,\n labels={\n \"env\": \"default\",\n })\ncustom_big_query_export_config = gcp.securitycenter.V2FolderSccBigQueryExport(\"custom_big_query_export_config\",\n big_query_export_id=\"my-export\",\n folder=folder.folder_id,\n dataset=default.id,\n location=\"global\",\n description=\"Cloud Security Command Center Findings Big Query Export Config\",\n filter=\"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var folder = new Gcp.Organizations.Folder(\"folder\", new()\n {\n Parent = \"organizations/123456789\",\n DisplayName = \"folder-name\",\n DeletionProtection = false,\n });\n\n var @default = new Gcp.BigQuery.Dataset(\"default\", new()\n {\n DatasetId = \"my_dataset_id\",\n FriendlyName = \"test\",\n Description = \"This is a test description\",\n Location = \"US\",\n DefaultTableExpirationMs = 3600000,\n DefaultPartitionExpirationMs = null,\n Labels = \n {\n { \"env\", \"default\" },\n },\n });\n\n var customBigQueryExportConfig = new Gcp.SecurityCenter.V2FolderSccBigQueryExport(\"custom_big_query_export_config\", new()\n {\n BigQueryExportId = \"my-export\",\n Folder = folder.FolderId,\n Dataset = @default.Id,\n Location = \"global\",\n Description = \"Cloud Security Command Center Findings Big Query Export Config\",\n Filter = \"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/securitycenter\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tfolder, err := organizations.NewFolder(ctx, \"folder\", \u0026organizations.FolderArgs{\n\t\t\tParent: pulumi.String(\"organizations/123456789\"),\n\t\t\tDisplayName: pulumi.String(\"folder-name\"),\n\t\t\tDeletionProtection: pulumi.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = bigquery.NewDataset(ctx, \"default\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"my_dataset_id\"),\n\t\t\tFriendlyName: pulumi.String(\"test\"),\n\t\t\tDescription: pulumi.String(\"This is a test description\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDefaultTableExpirationMs: pulumi.Int(3600000),\n\t\t\tDefaultPartitionExpirationMs: nil,\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"env\": pulumi.String(\"default\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = securitycenter.NewV2FolderSccBigQueryExport(ctx, \"custom_big_query_export_config\", \u0026securitycenter.V2FolderSccBigQueryExportArgs{\n\t\t\tBigQueryExportId: pulumi.String(\"my-export\"),\n\t\t\tFolder: folder.FolderId,\n\t\t\tDataset: _default.ID(),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tDescription: pulumi.String(\"Cloud Security Command Center Findings Big Query Export Config\"),\n\t\t\tFilter: pulumi.String(\"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.organizations.Folder;\nimport com.pulumi.gcp.organizations.FolderArgs;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.securitycenter.V2FolderSccBigQueryExport;\nimport com.pulumi.gcp.securitycenter.V2FolderSccBigQueryExportArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var folder = new Folder(\"folder\", FolderArgs.builder()\n .parent(\"organizations/123456789\")\n .displayName(\"folder-name\")\n .deletionProtection(false)\n .build());\n\n var default_ = new Dataset(\"default\", DatasetArgs.builder()\n .datasetId(\"my_dataset_id\")\n .friendlyName(\"test\")\n .description(\"This is a test description\")\n .location(\"US\")\n .defaultTableExpirationMs(3600000)\n .defaultPartitionExpirationMs(null)\n .labels(Map.of(\"env\", \"default\"))\n .build());\n\n var customBigQueryExportConfig = new V2FolderSccBigQueryExport(\"customBigQueryExportConfig\", V2FolderSccBigQueryExportArgs.builder()\n .bigQueryExportId(\"my-export\")\n .folder(folder.folderId())\n .dataset(default_.id())\n .location(\"global\")\n .description(\"Cloud Security Command Center Findings Big Query Export Config\")\n .filter(\"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n folder:\n type: gcp:organizations:Folder\n properties:\n parent: organizations/123456789\n displayName: folder-name\n deletionProtection: false\n default:\n type: gcp:bigquery:Dataset\n properties:\n datasetId: my_dataset_id\n friendlyName: test\n description: This is a test description\n location: US\n defaultTableExpirationMs: 3.6e+06\n defaultPartitionExpirationMs: null\n labels:\n env: default\n customBigQueryExportConfig:\n type: gcp:securitycenter:V2FolderSccBigQueryExport\n name: custom_big_query_export_config\n properties:\n bigQueryExportId: my-export\n folder: ${folder.folderId}\n dataset: ${default.id}\n location: global\n description: Cloud Security Command Center Findings Big Query Export Config\n filter: state=\"ACTIVE\" AND NOT mute=\"MUTED\"\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nFolderSccBigQueryExport can be imported using any of these accepted formats:\n\n* `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`\n\n* `{{folder}}/{{location}}/{{big_query_export_id}}`\n\nWhen using the `pulumi import` command, FolderSccBigQueryExport can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport default folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}\n```\n\n```sh\n$ pulumi import gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport default {{folder}}/{{location}}/{{big_query_export_id}}\n```\n\n", + "properties": { + "bigQueryExportId": { + "type": "string", + "description": "This must be unique within the organization. It must consist of only lowercase letters,\nnumbers, and hyphens, must start with a letter, must end with either a letter or a number,\nand must be 63 characters or less.\n\n\n- - -\n" + }, + "createTime": { + "type": "string", + "description": "The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".\n" + }, + "dataset": { + "type": "string", + "description": "The dataset to write findings' updates to.\nIts format is \"projects/[projectId]/datasets/[bigquery_dataset_id]\".\nBigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).\n" + }, + "description": { + "type": "string", + "description": "The description of the notification config (max of 1024 characters).\n" + }, + "filter": { + "type": "string", + "description": "Expression that defines the filter to apply across create/update\nevents of findings. The\nexpression is a list of zero or more restrictions combined via\nlogical operators AND and OR. Parentheses are supported, and OR\nhas higher precedence than AND.\nRestrictions have the form \u003cfield\u003e \u003coperator\u003e \u003cvalue\u003e and may have\na - character in front of them to indicate negation. The fields\nmap to those defined in the corresponding resource.\nThe supported operators are:\n* = for all value types.\n* \u003e, \u003c, \u003e=, \u003c= for integer values.\n* :, meaning substring matching, for strings.\nThe supported value types are:\n* string literals in quotes.\n* integer literals without quotes.\n* boolean literals true and false without quotes.\nSee\n[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications)\nfor information on how to write a filter.\n" + }, + "folder": { + "type": "string", + "description": "The folder where Cloud Security Command Center Big Query Export\nConfig lives in.\n" + }, + "location": { + "type": "string", + "description": "The BigQuery export configuration is stored in this location. If not provided, Use global as default.\n" + }, + "mostRecentEditor": { + "type": "string", + "description": "Email address of the user who last edited the BigQuery export.\nThis field is set by the server and will be ignored if provided on export creation or update.\n" + }, + "name": { + "type": "string", + "description": "The resource name of this export, in the format\n`folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`.\nThis field is provided in responses, and is ignored when provided in create requests.\n" + }, + "principal": { + "type": "string", + "description": "The service account that needs permission to create table and upload data to the BigQuery dataset.\n" + }, + "updateTime": { + "type": "string", + "description": "The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".\n" + } + }, + "required": [ + "bigQueryExportId", + "createTime", + "folder", + "mostRecentEditor", + "name", + "principal", + "updateTime" + ], + "inputProperties": { + "bigQueryExportId": { + "type": "string", + "description": "This must be unique within the organization. It must consist of only lowercase letters,\nnumbers, and hyphens, must start with a letter, must end with either a letter or a number,\nand must be 63 characters or less.\n\n\n- - -\n", + "willReplaceOnChanges": true + }, + "dataset": { + "type": "string", + "description": "The dataset to write findings' updates to.\nIts format is \"projects/[projectId]/datasets/[bigquery_dataset_id]\".\nBigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).\n" + }, + "description": { + "type": "string", + "description": "The description of the notification config (max of 1024 characters).\n" + }, + "filter": { + "type": "string", + "description": "Expression that defines the filter to apply across create/update\nevents of findings. The\nexpression is a list of zero or more restrictions combined via\nlogical operators AND and OR. Parentheses are supported, and OR\nhas higher precedence than AND.\nRestrictions have the form \u003cfield\u003e \u003coperator\u003e \u003cvalue\u003e and may have\na - character in front of them to indicate negation. The fields\nmap to those defined in the corresponding resource.\nThe supported operators are:\n* = for all value types.\n* \u003e, \u003c, \u003e=, \u003c= for integer values.\n* :, meaning substring matching, for strings.\nThe supported value types are:\n* string literals in quotes.\n* integer literals without quotes.\n* boolean literals true and false without quotes.\nSee\n[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications)\nfor information on how to write a filter.\n" + }, + "folder": { + "type": "string", + "description": "The folder where Cloud Security Command Center Big Query Export\nConfig lives in.\n", + "willReplaceOnChanges": true + }, + "location": { + "type": "string", + "description": "The BigQuery export configuration is stored in this location. If not provided, Use global as default.\n", + "willReplaceOnChanges": true + } + }, + "requiredInputs": [ + "bigQueryExportId", + "folder" + ], + "stateInputs": { + "description": "Input properties used for looking up and filtering V2FolderSccBigQueryExport resources.\n", + "properties": { + "bigQueryExportId": { + "type": "string", + "description": "This must be unique within the organization. It must consist of only lowercase letters,\nnumbers, and hyphens, must start with a letter, must end with either a letter or a number,\nand must be 63 characters or less.\n\n\n- - -\n", + "willReplaceOnChanges": true + }, + "createTime": { + "type": "string", + "description": "The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".\n" + }, + "dataset": { + "type": "string", + "description": "The dataset to write findings' updates to.\nIts format is \"projects/[projectId]/datasets/[bigquery_dataset_id]\".\nBigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).\n" + }, + "description": { + "type": "string", + "description": "The description of the notification config (max of 1024 characters).\n" + }, + "filter": { + "type": "string", + "description": "Expression that defines the filter to apply across create/update\nevents of findings. The\nexpression is a list of zero or more restrictions combined via\nlogical operators AND and OR. Parentheses are supported, and OR\nhas higher precedence than AND.\nRestrictions have the form \u003cfield\u003e \u003coperator\u003e \u003cvalue\u003e and may have\na - character in front of them to indicate negation. The fields\nmap to those defined in the corresponding resource.\nThe supported operators are:\n* = for all value types.\n* \u003e, \u003c, \u003e=, \u003c= for integer values.\n* :, meaning substring matching, for strings.\nThe supported value types are:\n* string literals in quotes.\n* integer literals without quotes.\n* boolean literals true and false without quotes.\nSee\n[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications)\nfor information on how to write a filter.\n" + }, + "folder": { + "type": "string", + "description": "The folder where Cloud Security Command Center Big Query Export\nConfig lives in.\n", + "willReplaceOnChanges": true + }, + "location": { + "type": "string", + "description": "The BigQuery export configuration is stored in this location. If not provided, Use global as default.\n", + "willReplaceOnChanges": true + }, + "mostRecentEditor": { + "type": "string", + "description": "Email address of the user who last edited the BigQuery export.\nThis field is set by the server and will be ignored if provided on export creation or update.\n" + }, + "name": { + "type": "string", + "description": "The resource name of this export, in the format\n`folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`.\nThis field is provided in responses, and is ignored when provided in create requests.\n" + }, + "principal": { + "type": "string", + "description": "The service account that needs permission to create table and upload data to the BigQuery dataset.\n" + }, + "updateTime": { + "type": "string", + "description": "The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".\n" + } + }, + "type": "object" + } + }, "gcp:securitycenter/v2OrganizationMuteConfig:V2OrganizationMuteConfig": { "description": "Mute Findings is a volume management feature in Security Command Center\nthat lets you manually or programmatically hide irrelevant findings,\nand create filters to automatically silence existing and future\nfindings based on criteria you specify.\n\n\nTo get more information about OrganizationMuteConfig, see:\n\n* [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/organizations.muteConfigs)\n\n## Example Usage\n\n### Scc V2 Organization Mute Config Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.securitycenter.V2OrganizationMuteConfig(\"default\", {\n muteConfigId: \"my-config\",\n organization: \"123456789\",\n location: \"global\",\n description: \"My custom Cloud Security Command Center Finding Organization mute Configuration\",\n filter: \"severity = \\\"HIGH\\\"\",\n type: \"STATIC\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.securitycenter.V2OrganizationMuteConfig(\"default\",\n mute_config_id=\"my-config\",\n organization=\"123456789\",\n location=\"global\",\n description=\"My custom Cloud Security Command Center Finding Organization mute Configuration\",\n filter=\"severity = \\\"HIGH\\\"\",\n type=\"STATIC\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.SecurityCenter.V2OrganizationMuteConfig(\"default\", new()\n {\n MuteConfigId = \"my-config\",\n Organization = \"123456789\",\n Location = \"global\",\n Description = \"My custom Cloud Security Command Center Finding Organization mute Configuration\",\n Filter = \"severity = \\\"HIGH\\\"\",\n Type = \"STATIC\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/securitycenter\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := securitycenter.NewV2OrganizationMuteConfig(ctx, \"default\", \u0026securitycenter.V2OrganizationMuteConfigArgs{\n\t\t\tMuteConfigId: pulumi.String(\"my-config\"),\n\t\t\tOrganization: pulumi.String(\"123456789\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tDescription: pulumi.String(\"My custom Cloud Security Command Center Finding Organization mute Configuration\"),\n\t\t\tFilter: pulumi.String(\"severity = \\\"HIGH\\\"\"),\n\t\t\tType: pulumi.String(\"STATIC\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.securitycenter.V2OrganizationMuteConfig;\nimport com.pulumi.gcp.securitycenter.V2OrganizationMuteConfigArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new V2OrganizationMuteConfig(\"default\", V2OrganizationMuteConfigArgs.builder()\n .muteConfigId(\"my-config\")\n .organization(\"123456789\")\n .location(\"global\")\n .description(\"My custom Cloud Security Command Center Finding Organization mute Configuration\")\n .filter(\"severity = \\\"HIGH\\\"\")\n .type(\"STATIC\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:securitycenter:V2OrganizationMuteConfig\n properties:\n muteConfigId: my-config\n organization: '123456789'\n location: global\n description: My custom Cloud Security Command Center Finding Organization mute Configuration\n filter: severity = \"HIGH\"\n type: STATIC\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nOrganizationMuteConfig can be imported using any of these accepted formats:\n\n* `organizations/{{organization}}/locations/{{location}}/muteConfigs/{{mute_config_id}}`\n\n* `{{organization}}/{{location}}/{{mute_config_id}}`\n\nWhen using the `pulumi import` command, OrganizationMuteConfig can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:securitycenter/v2OrganizationMuteConfig:V2OrganizationMuteConfig default organizations/{{organization}}/locations/{{location}}/muteConfigs/{{mute_config_id}}\n```\n\n```sh\n$ pulumi import gcp:securitycenter/v2OrganizationMuteConfig:V2OrganizationMuteConfig default {{organization}}/{{location}}/{{mute_config_id}}\n```\n\n", "properties": { @@ -237890,7 +239664,7 @@ } }, "gcp:securitycenter/v2OrganizationSccBigQueryExports:V2OrganizationSccBigQueryExports": { - "description": "A Cloud Security Command Center (Cloud SCC) Big Query Export Config.\nIt represents exporting Security Command Center data, including assets, findings, and security marks\nusing gcloud scc bqexports\n\u003e **Note:** In order to use Cloud SCC resources, your organization must be enrolled\nin [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center).\nWithout doing so, you may run into errors during resource creation.\n\n\nTo get more information about OrganizationSccBigQueryExports, see:\n\n* [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/organizations.locations.bigQueryExports)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query)\n\n## Example Usage\n\n### Scc V2 Organization Big Query Export Config Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.bigquery.Dataset(\"default\", {\n datasetId: \"my_dataset_id\",\n friendlyName: \"test\",\n description: \"This is a test description\",\n location: \"US\",\n defaultTableExpirationMs: 3600000,\n defaultPartitionExpirationMs: undefined,\n labels: {\n env: \"default\",\n },\n});\nconst customBigQueryExportConfig = new gcp.securitycenter.V2OrganizationSccBigQueryExports(\"custom_big_query_export_config\", {\n name: \"my-export\",\n bigQueryExportId: \"my-export\",\n organization: \"123456789\",\n dataset: \"my-dataset\",\n location: \"global\",\n description: \"Cloud Security Command Center Findings Big Query Export Config\",\n filter: \"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.bigquery.Dataset(\"default\",\n dataset_id=\"my_dataset_id\",\n friendly_name=\"test\",\n description=\"This is a test description\",\n location=\"US\",\n default_table_expiration_ms=3600000,\n default_partition_expiration_ms=None,\n labels={\n \"env\": \"default\",\n })\ncustom_big_query_export_config = gcp.securitycenter.V2OrganizationSccBigQueryExports(\"custom_big_query_export_config\",\n name=\"my-export\",\n big_query_export_id=\"my-export\",\n organization=\"123456789\",\n dataset=\"my-dataset\",\n location=\"global\",\n description=\"Cloud Security Command Center Findings Big Query Export Config\",\n filter=\"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.BigQuery.Dataset(\"default\", new()\n {\n DatasetId = \"my_dataset_id\",\n FriendlyName = \"test\",\n Description = \"This is a test description\",\n Location = \"US\",\n DefaultTableExpirationMs = 3600000,\n DefaultPartitionExpirationMs = null,\n Labels = \n {\n { \"env\", \"default\" },\n },\n });\n\n var customBigQueryExportConfig = new Gcp.SecurityCenter.V2OrganizationSccBigQueryExports(\"custom_big_query_export_config\", new()\n {\n Name = \"my-export\",\n BigQueryExportId = \"my-export\",\n Organization = \"123456789\",\n Dataset = \"my-dataset\",\n Location = \"global\",\n Description = \"Cloud Security Command Center Findings Big Query Export Config\",\n Filter = \"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/securitycenter\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := bigquery.NewDataset(ctx, \"default\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"my_dataset_id\"),\n\t\t\tFriendlyName: pulumi.String(\"test\"),\n\t\t\tDescription: pulumi.String(\"This is a test description\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDefaultTableExpirationMs: pulumi.Int(3600000),\n\t\t\tDefaultPartitionExpirationMs: nil,\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"env\": pulumi.String(\"default\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = securitycenter.NewV2OrganizationSccBigQueryExports(ctx, \"custom_big_query_export_config\", \u0026securitycenter.V2OrganizationSccBigQueryExportsArgs{\n\t\t\tName: pulumi.String(\"my-export\"),\n\t\t\tBigQueryExportId: pulumi.String(\"my-export\"),\n\t\t\tOrganization: pulumi.String(\"123456789\"),\n\t\t\tDataset: pulumi.String(\"my-dataset\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tDescription: pulumi.String(\"Cloud Security Command Center Findings Big Query Export Config\"),\n\t\t\tFilter: pulumi.String(\"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.securitycenter.V2OrganizationSccBigQueryExports;\nimport com.pulumi.gcp.securitycenter.V2OrganizationSccBigQueryExportsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Dataset(\"default\", DatasetArgs.builder()\n .datasetId(\"my_dataset_id\")\n .friendlyName(\"test\")\n .description(\"This is a test description\")\n .location(\"US\")\n .defaultTableExpirationMs(3600000)\n .defaultPartitionExpirationMs(null)\n .labels(Map.of(\"env\", \"default\"))\n .build());\n\n var customBigQueryExportConfig = new V2OrganizationSccBigQueryExports(\"customBigQueryExportConfig\", V2OrganizationSccBigQueryExportsArgs.builder()\n .name(\"my-export\")\n .bigQueryExportId(\"my-export\")\n .organization(\"123456789\")\n .dataset(\"my-dataset\")\n .location(\"global\")\n .description(\"Cloud Security Command Center Findings Big Query Export Config\")\n .filter(\"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:bigquery:Dataset\n properties:\n datasetId: my_dataset_id\n friendlyName: test\n description: This is a test description\n location: US\n defaultTableExpirationMs: 3.6e+06\n defaultPartitionExpirationMs: null\n labels:\n env: default\n customBigQueryExportConfig:\n type: gcp:securitycenter:V2OrganizationSccBigQueryExports\n name: custom_big_query_export_config\n properties:\n name: my-export\n bigQueryExportId: my-export\n organization: '123456789'\n dataset: my-dataset\n location: global\n description: Cloud Security Command Center Findings Big Query Export Config\n filter: state=\"ACTIVE\" AND NOT mute=\"MUTED\"\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nOrganizationSccBigQueryExports can be imported using any of these accepted formats:\n\n* `organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`\n\n* `{{organization}}/{{location}}/{{big_query_export_id}}`\n\nWhen using the `pulumi import` command, OrganizationSccBigQueryExports can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:securitycenter/v2OrganizationSccBigQueryExports:V2OrganizationSccBigQueryExports default organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}\n```\n\n```sh\n$ pulumi import gcp:securitycenter/v2OrganizationSccBigQueryExports:V2OrganizationSccBigQueryExports default {{organization}}/{{location}}/{{big_query_export_id}}\n```\n\n", + "description": "A Cloud Security Command Center (Cloud SCC) Big Query Export Config.\nIt represents exporting Security Command Center data, including assets, findings, and security marks\nusing gcloud scc bqexports\n\u003e **Note:** In order to use Cloud SCC resources, your organization must be enrolled\nin [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center).\nWithout doing so, you may run into errors during resource creation.\n\n\nTo get more information about OrganizationSccBigQueryExports, see:\n\n* [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/organizations.locations.bigQueryExports)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query)\n\n## Example Usage\n\n### Scc V2 Organization Big Query Export Config Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst _default = new gcp.bigquery.Dataset(\"default\", {\n datasetId: \"\",\n friendlyName: \"test\",\n description: \"This is a test description\",\n location: \"US\",\n defaultTableExpirationMs: 3600000,\n defaultPartitionExpirationMs: undefined,\n labels: {\n env: \"default\",\n },\n});\nconst customBigQueryExportConfig = new gcp.securitycenter.V2OrganizationSccBigQueryExports(\"custom_big_query_export_config\", {\n name: \"my-export\",\n bigQueryExportId: \"my-export\",\n organization: \"123456789\",\n dataset: _default.id,\n location: \"global\",\n description: \"Cloud Security Command Center Findings Big Query Export Config\",\n filter: \"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.bigquery.Dataset(\"default\",\n dataset_id=\"\",\n friendly_name=\"test\",\n description=\"This is a test description\",\n location=\"US\",\n default_table_expiration_ms=3600000,\n default_partition_expiration_ms=None,\n labels={\n \"env\": \"default\",\n })\ncustom_big_query_export_config = gcp.securitycenter.V2OrganizationSccBigQueryExports(\"custom_big_query_export_config\",\n name=\"my-export\",\n big_query_export_id=\"my-export\",\n organization=\"123456789\",\n dataset=default.id,\n location=\"global\",\n description=\"Cloud Security Command Center Findings Big Query Export Config\",\n filter=\"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = new Gcp.BigQuery.Dataset(\"default\", new()\n {\n DatasetId = \"\",\n FriendlyName = \"test\",\n Description = \"This is a test description\",\n Location = \"US\",\n DefaultTableExpirationMs = 3600000,\n DefaultPartitionExpirationMs = null,\n Labels = \n {\n { \"env\", \"default\" },\n },\n });\n\n var customBigQueryExportConfig = new Gcp.SecurityCenter.V2OrganizationSccBigQueryExports(\"custom_big_query_export_config\", new()\n {\n Name = \"my-export\",\n BigQueryExportId = \"my-export\",\n Organization = \"123456789\",\n Dataset = @default.Id,\n Location = \"global\",\n Description = \"Cloud Security Command Center Findings Big Query Export Config\",\n Filter = \"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery\"\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/securitycenter\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := bigquery.NewDataset(ctx, \"default\", \u0026bigquery.DatasetArgs{\n\t\t\tDatasetId: pulumi.String(\"\"),\n\t\t\tFriendlyName: pulumi.String(\"test\"),\n\t\t\tDescription: pulumi.String(\"This is a test description\"),\n\t\t\tLocation: pulumi.String(\"US\"),\n\t\t\tDefaultTableExpirationMs: pulumi.Int(3600000),\n\t\t\tDefaultPartitionExpirationMs: nil,\n\t\t\tLabels: pulumi.StringMap{\n\t\t\t\t\"env\": pulumi.String(\"default\"),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = securitycenter.NewV2OrganizationSccBigQueryExports(ctx, \"custom_big_query_export_config\", \u0026securitycenter.V2OrganizationSccBigQueryExportsArgs{\n\t\t\tName: pulumi.String(\"my-export\"),\n\t\t\tBigQueryExportId: pulumi.String(\"my-export\"),\n\t\t\tOrganization: pulumi.String(\"123456789\"),\n\t\t\tDataset: _default.ID(),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tDescription: pulumi.String(\"Cloud Security Command Center Findings Big Query Export Config\"),\n\t\t\tFilter: pulumi.String(\"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.bigquery.Dataset;\nimport com.pulumi.gcp.bigquery.DatasetArgs;\nimport com.pulumi.gcp.securitycenter.V2OrganizationSccBigQueryExports;\nimport com.pulumi.gcp.securitycenter.V2OrganizationSccBigQueryExportsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var default_ = new Dataset(\"default\", DatasetArgs.builder()\n .datasetId(\"\")\n .friendlyName(\"test\")\n .description(\"This is a test description\")\n .location(\"US\")\n .defaultTableExpirationMs(3600000)\n .defaultPartitionExpirationMs(null)\n .labels(Map.of(\"env\", \"default\"))\n .build());\n\n var customBigQueryExportConfig = new V2OrganizationSccBigQueryExports(\"customBigQueryExportConfig\", V2OrganizationSccBigQueryExportsArgs.builder()\n .name(\"my-export\")\n .bigQueryExportId(\"my-export\")\n .organization(\"123456789\")\n .dataset(default_.id())\n .location(\"global\")\n .description(\"Cloud Security Command Center Findings Big Query Export Config\")\n .filter(\"state=\\\"ACTIVE\\\" AND NOT mute=\\\"MUTED\\\"\")\n .build());\n\n }\n}\n```\n```yaml\nresources:\n default:\n type: gcp:bigquery:Dataset\n properties:\n datasetId:\n friendlyName: test\n description: This is a test description\n location: US\n defaultTableExpirationMs: 3.6e+06\n defaultPartitionExpirationMs: null\n labels:\n env: default\n customBigQueryExportConfig:\n type: gcp:securitycenter:V2OrganizationSccBigQueryExports\n name: custom_big_query_export_config\n properties:\n name: my-export\n bigQueryExportId: my-export\n organization: '123456789'\n dataset: ${default.id}\n location: global\n description: Cloud Security Command Center Findings Big Query Export Config\n filter: state=\"ACTIVE\" AND NOT mute=\"MUTED\"\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nOrganizationSccBigQueryExports can be imported using any of these accepted formats:\n\n* `organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`\n\n* `{{organization}}/{{location}}/{{big_query_export_id}}`\n\nWhen using the `pulumi import` command, OrganizationSccBigQueryExports can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:securitycenter/v2OrganizationSccBigQueryExports:V2OrganizationSccBigQueryExports default organizations/{{organization}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}\n```\n\n```sh\n$ pulumi import gcp:securitycenter/v2OrganizationSccBigQueryExports:V2OrganizationSccBigQueryExports default {{organization}}/{{location}}/{{big_query_export_id}}\n```\n\n", "properties": { "bigQueryExportId": { "type": "string", @@ -238630,6 +240404,149 @@ "type": "object" } }, + "gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport": { + "description": "A Cloud Security Command Center (Cloud SCC) Big Query Export Config.\nIt represents exporting Security Command Center data, including assets, findings, and security marks\nusing gcloud scc bqexports\n\u003e **Note:** In order to use Cloud SCC resources, your organization must be enrolled\nin [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center).\nWithout doing so, you may run into errors during resource creation.\n\n\nTo get more information about ProjectSccBigQueryExport, see:\n\n* [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/projects.locations.bigQueryExports)\n* How-to Guides\n * [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query)\n\n## Example Usage\n\n### Scc V2 Project Big Query Export Config Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```yaml\nresources:\n default:\n type: gcp:bigquery:Dataset\n properties:\n datasetId: my_dataset_id\n friendlyName: test\n description: This is a test description\n location: US\n defaultTableExpirationMs: 3.6e+06\n defaultPartitionExpirationMs: null\n labels:\n env: default\n customBigQueryExportConfig:\n type: gcp:securitycenter:V2ProjectSccBigQueryExport\n name: custom_big_query_export_config\n properties:\n name: my-export\n bigQueryExportId: my-export\n project: my-project-name\n dataset: ${default.id}\n location: global\n description: Cloud Security Command Center Findings Big Query Export Config\n filter: state=\"ACTIVE\" AND NOT mute=\"MUTED\"\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nProjectSccBigQueryExport can be imported using any of these accepted formats:\n\n* `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`\n\n* `{{project}}/{{location}}/{{big_query_export_id}}`\n\n* `{{location}}/{{big_query_export_id}}`\n\nWhen using the `pulumi import` command, ProjectSccBigQueryExport can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}\n```\n\n```sh\n$ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default {{project}}/{{location}}/{{big_query_export_id}}\n```\n\n```sh\n$ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default {{location}}/{{big_query_export_id}}\n```\n\n", + "properties": { + "bigQueryExportId": { + "type": "string", + "description": "This must be unique within the organization.\n\n\n- - -\n" + }, + "createTime": { + "type": "string", + "description": "The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".\n" + }, + "dataset": { + "type": "string", + "description": "The dataset to write findings' updates to.\nIts format is \"projects/[projectId]/datasets/[bigquery_dataset_id]\".\nBigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).\n" + }, + "description": { + "type": "string", + "description": "The description of the notification config (max of 1024 characters).\n" + }, + "filter": { + "type": "string", + "description": "Expression that defines the filter to apply across create/update\nevents of findings. The\nexpression is a list of zero or more restrictions combined via\nlogical operators AND and OR. Parentheses are supported, and OR\nhas higher precedence than AND.\nRestrictions have the form \u003cfield\u003e \u003coperator\u003e \u003cvalue\u003e and may have\na - character in front of them to indicate negation. The fields\nmap to those defined in the corresponding resource.\nThe supported operators are:\n* = for all value types.\n* \u003e, \u003c, \u003e=, \u003c= for integer values.\n* :, meaning substring matching, for strings.\nThe supported value types are:\n* string literals in quotes.\n* integer literals without quotes.\n* boolean literals true and false without quotes.\nSee\n[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications)\nfor information on how to write a filter.\n" + }, + "location": { + "type": "string", + "description": "location Id is provided by organization. If not provided, Use global as default.\n" + }, + "mostRecentEditor": { + "type": "string", + "description": "Email address of the user who last edited the BigQuery export.\nThis field is set by the server and will be ignored if provided on export creation or update.\n" + }, + "name": { + "type": "string", + "description": "The resource name of this export, in the format\n`projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`.\nThis field is provided in responses, and is ignored when provided in create requests.\n" + }, + "principal": { + "type": "string", + "description": "The service account that needs permission to create table and upload data to the BigQuery dataset.\n" + }, + "project": { + "type": "string", + "description": "The ID of the project in which the resource belongs.\nIf it is not provided, the provider project is used.\n" + }, + "updateTime": { + "type": "string", + "description": "The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".\n" + } + }, + "required": [ + "bigQueryExportId", + "createTime", + "mostRecentEditor", + "name", + "principal", + "project", + "updateTime" + ], + "inputProperties": { + "bigQueryExportId": { + "type": "string", + "description": "This must be unique within the organization.\n\n\n- - -\n", + "willReplaceOnChanges": true + }, + "dataset": { + "type": "string", + "description": "The dataset to write findings' updates to.\nIts format is \"projects/[projectId]/datasets/[bigquery_dataset_id]\".\nBigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).\n" + }, + "description": { + "type": "string", + "description": "The description of the notification config (max of 1024 characters).\n" + }, + "filter": { + "type": "string", + "description": "Expression that defines the filter to apply across create/update\nevents of findings. The\nexpression is a list of zero or more restrictions combined via\nlogical operators AND and OR. Parentheses are supported, and OR\nhas higher precedence than AND.\nRestrictions have the form \u003cfield\u003e \u003coperator\u003e \u003cvalue\u003e and may have\na - character in front of them to indicate negation. The fields\nmap to those defined in the corresponding resource.\nThe supported operators are:\n* = for all value types.\n* \u003e, \u003c, \u003e=, \u003c= for integer values.\n* :, meaning substring matching, for strings.\nThe supported value types are:\n* string literals in quotes.\n* integer literals without quotes.\n* boolean literals true and false without quotes.\nSee\n[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications)\nfor information on how to write a filter.\n" + }, + "location": { + "type": "string", + "description": "location Id is provided by organization. If not provided, Use global as default.\n", + "willReplaceOnChanges": true + }, + "project": { + "type": "string", + "description": "The ID of the project in which the resource belongs.\nIf it is not provided, the provider project is used.\n", + "willReplaceOnChanges": true + } + }, + "requiredInputs": [ + "bigQueryExportId" + ], + "stateInputs": { + "description": "Input properties used for looking up and filtering V2ProjectSccBigQueryExport resources.\n", + "properties": { + "bigQueryExportId": { + "type": "string", + "description": "This must be unique within the organization.\n\n\n- - -\n", + "willReplaceOnChanges": true + }, + "createTime": { + "type": "string", + "description": "The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".\n" + }, + "dataset": { + "type": "string", + "description": "The dataset to write findings' updates to.\nIts format is \"projects/[projectId]/datasets/[bigquery_dataset_id]\".\nBigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).\n" + }, + "description": { + "type": "string", + "description": "The description of the notification config (max of 1024 characters).\n" + }, + "filter": { + "type": "string", + "description": "Expression that defines the filter to apply across create/update\nevents of findings. The\nexpression is a list of zero or more restrictions combined via\nlogical operators AND and OR. Parentheses are supported, and OR\nhas higher precedence than AND.\nRestrictions have the form \u003cfield\u003e \u003coperator\u003e \u003cvalue\u003e and may have\na - character in front of them to indicate negation. The fields\nmap to those defined in the corresponding resource.\nThe supported operators are:\n* = for all value types.\n* \u003e, \u003c, \u003e=, \u003c= for integer values.\n* :, meaning substring matching, for strings.\nThe supported value types are:\n* string literals in quotes.\n* integer literals without quotes.\n* boolean literals true and false without quotes.\nSee\n[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications)\nfor information on how to write a filter.\n" + }, + "location": { + "type": "string", + "description": "location Id is provided by organization. If not provided, Use global as default.\n", + "willReplaceOnChanges": true + }, + "mostRecentEditor": { + "type": "string", + "description": "Email address of the user who last edited the BigQuery export.\nThis field is set by the server and will be ignored if provided on export creation or update.\n" + }, + "name": { + "type": "string", + "description": "The resource name of this export, in the format\n`projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`.\nThis field is provided in responses, and is ignored when provided in create requests.\n" + }, + "principal": { + "type": "string", + "description": "The service account that needs permission to create table and upload data to the BigQuery dataset.\n" + }, + "project": { + "type": "string", + "description": "The ID of the project in which the resource belongs.\nIf it is not provided, the provider project is used.\n", + "willReplaceOnChanges": true + }, + "updateTime": { + "type": "string", + "description": "The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update.\nA timestamp in RFC3339 UTC \"Zulu\" format, with nanosecond resolution and up to nine fractional digits.\nExamples: \"2014-10-02T15:01:23Z\" and \"2014-10-02T15:01:23.045123456Z\".\n" + } + }, + "type": "object" + } + }, "gcp:securityposture/posture:Posture": { "description": "A Posture represents a collection of policy set including its name, state, description\nand policy sets. A policy set includes set of policies along with their definition.\nA posture can be created at the organization level.\nEvery update to a deployed posture creates a new posture revision with an updated revision_id.\n\n\nTo get more information about Posture, see:\n\n* How-to Guides\n * [Create and deploy a posture](https://cloud.google.com/security-command-center/docs/how-to-use-security-posture)\n\n## Example Usage\n\n### Securityposture Posture Basic\n\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst posture1 = new gcp.securityposture.Posture(\"posture1\", {\n postureId: \"posture_example\",\n parent: \"organizations/123456789\",\n location: \"global\",\n state: \"ACTIVE\",\n description: \"a new posture\",\n policySets: [\n {\n policySetId: \"org_policy_set\",\n description: \"set of org policies\",\n policies: [\n {\n policyId: \"canned_org_policy\",\n constraint: {\n orgPolicyConstraint: {\n cannedConstraintId: \"storage.uniformBucketLevelAccess\",\n policyRules: [{\n enforce: true,\n condition: {\n description: \"condition description\",\n expression: \"resource.matchTag('org_id/tag_key_short_name,'tag_value_short_name')\",\n title: \"a CEL condition\",\n },\n }],\n },\n },\n },\n {\n policyId: \"custom_org_policy\",\n constraint: {\n orgPolicyConstraintCustom: {\n customConstraint: {\n name: \"organizations/123456789/customConstraints/custom.disableGkeAutoUpgrade\",\n displayName: \"Disable GKE auto upgrade\",\n description: \"Only allow GKE NodePool resource to be created or updated if AutoUpgrade is not enabled where this custom constraint is enforced.\",\n actionType: \"ALLOW\",\n condition: \"resource.management.autoUpgrade == false\",\n methodTypes: [\n \"CREATE\",\n \"UPDATE\",\n ],\n resourceTypes: [\"container.googleapis.com/NodePool\"],\n },\n policyRules: [{\n enforce: true,\n condition: {\n description: \"condition description\",\n expression: \"resource.matchTagId('tagKeys/key_id','tagValues/value_id')\",\n title: \"a CEL condition\",\n },\n }],\n },\n },\n },\n ],\n },\n {\n policySetId: \"sha_policy_set\",\n description: \"set of sha policies\",\n policies: [\n {\n policyId: \"sha_builtin_module\",\n constraint: {\n securityHealthAnalyticsModule: {\n moduleName: \"BIGQUERY_TABLE_CMEK_DISABLED\",\n moduleEnablementState: \"ENABLED\",\n },\n },\n description: \"enable BIGQUERY_TABLE_CMEK_DISABLED\",\n },\n {\n policyId: \"sha_custom_module\",\n constraint: {\n securityHealthAnalyticsCustomModule: {\n displayName: \"custom_SHA_policy\",\n config: {\n predicate: {\n expression: \"resource.rotationPeriod \u003e duration('2592000s')\",\n },\n customOutput: {\n properties: [{\n name: \"duration\",\n valueExpression: {\n expression: \"resource.rotationPeriod\",\n },\n }],\n },\n resourceSelector: {\n resourceTypes: [\"cloudkms.googleapis.com/CryptoKey\"],\n },\n severity: \"LOW\",\n description: \"Custom Module\",\n recommendation: \"Testing custom modules\",\n },\n moduleEnablementState: \"ENABLED\",\n },\n },\n },\n ],\n },\n ],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nposture1 = gcp.securityposture.Posture(\"posture1\",\n posture_id=\"posture_example\",\n parent=\"organizations/123456789\",\n location=\"global\",\n state=\"ACTIVE\",\n description=\"a new posture\",\n policy_sets=[\n {\n \"policy_set_id\": \"org_policy_set\",\n \"description\": \"set of org policies\",\n \"policies\": [\n {\n \"policy_id\": \"canned_org_policy\",\n \"constraint\": {\n \"org_policy_constraint\": {\n \"canned_constraint_id\": \"storage.uniformBucketLevelAccess\",\n \"policy_rules\": [{\n \"enforce\": True,\n \"condition\": {\n \"description\": \"condition description\",\n \"expression\": \"resource.matchTag('org_id/tag_key_short_name,'tag_value_short_name')\",\n \"title\": \"a CEL condition\",\n },\n }],\n },\n },\n },\n {\n \"policy_id\": \"custom_org_policy\",\n \"constraint\": {\n \"org_policy_constraint_custom\": {\n \"custom_constraint\": {\n \"name\": \"organizations/123456789/customConstraints/custom.disableGkeAutoUpgrade\",\n \"display_name\": \"Disable GKE auto upgrade\",\n \"description\": \"Only allow GKE NodePool resource to be created or updated if AutoUpgrade is not enabled where this custom constraint is enforced.\",\n \"action_type\": \"ALLOW\",\n \"condition\": \"resource.management.autoUpgrade == false\",\n \"method_types\": [\n \"CREATE\",\n \"UPDATE\",\n ],\n \"resource_types\": [\"container.googleapis.com/NodePool\"],\n },\n \"policy_rules\": [{\n \"enforce\": True,\n \"condition\": {\n \"description\": \"condition description\",\n \"expression\": \"resource.matchTagId('tagKeys/key_id','tagValues/value_id')\",\n \"title\": \"a CEL condition\",\n },\n }],\n },\n },\n },\n ],\n },\n {\n \"policy_set_id\": \"sha_policy_set\",\n \"description\": \"set of sha policies\",\n \"policies\": [\n {\n \"policy_id\": \"sha_builtin_module\",\n \"constraint\": {\n \"security_health_analytics_module\": {\n \"module_name\": \"BIGQUERY_TABLE_CMEK_DISABLED\",\n \"module_enablement_state\": \"ENABLED\",\n },\n },\n \"description\": \"enable BIGQUERY_TABLE_CMEK_DISABLED\",\n },\n {\n \"policy_id\": \"sha_custom_module\",\n \"constraint\": {\n \"security_health_analytics_custom_module\": {\n \"display_name\": \"custom_SHA_policy\",\n \"config\": {\n \"predicate\": {\n \"expression\": \"resource.rotationPeriod \u003e duration('2592000s')\",\n },\n \"custom_output\": {\n \"properties\": [{\n \"name\": \"duration\",\n \"value_expression\": {\n \"expression\": \"resource.rotationPeriod\",\n },\n }],\n },\n \"resource_selector\": {\n \"resource_types\": [\"cloudkms.googleapis.com/CryptoKey\"],\n },\n \"severity\": \"LOW\",\n \"description\": \"Custom Module\",\n \"recommendation\": \"Testing custom modules\",\n },\n \"module_enablement_state\": \"ENABLED\",\n },\n },\n },\n ],\n },\n ])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var posture1 = new Gcp.SecurityPosture.Posture(\"posture1\", new()\n {\n PostureId = \"posture_example\",\n Parent = \"organizations/123456789\",\n Location = \"global\",\n State = \"ACTIVE\",\n Description = \"a new posture\",\n PolicySets = new[]\n {\n new Gcp.SecurityPosture.Inputs.PosturePolicySetArgs\n {\n PolicySetId = \"org_policy_set\",\n Description = \"set of org policies\",\n Policies = new[]\n {\n new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyArgs\n {\n PolicyId = \"canned_org_policy\",\n Constraint = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintArgs\n {\n OrgPolicyConstraint = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintOrgPolicyConstraintArgs\n {\n CannedConstraintId = \"storage.uniformBucketLevelAccess\",\n PolicyRules = new[]\n {\n new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintOrgPolicyConstraintPolicyRuleArgs\n {\n Enforce = true,\n Condition = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintOrgPolicyConstraintPolicyRuleConditionArgs\n {\n Description = \"condition description\",\n Expression = \"resource.matchTag('org_id/tag_key_short_name,'tag_value_short_name')\",\n Title = \"a CEL condition\",\n },\n },\n },\n },\n },\n },\n new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyArgs\n {\n PolicyId = \"custom_org_policy\",\n Constraint = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintArgs\n {\n OrgPolicyConstraintCustom = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomArgs\n {\n CustomConstraint = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomCustomConstraintArgs\n {\n Name = \"organizations/123456789/customConstraints/custom.disableGkeAutoUpgrade\",\n DisplayName = \"Disable GKE auto upgrade\",\n Description = \"Only allow GKE NodePool resource to be created or updated if AutoUpgrade is not enabled where this custom constraint is enforced.\",\n ActionType = \"ALLOW\",\n Condition = \"resource.management.autoUpgrade == false\",\n MethodTypes = new[]\n {\n \"CREATE\",\n \"UPDATE\",\n },\n ResourceTypes = new[]\n {\n \"container.googleapis.com/NodePool\",\n },\n },\n PolicyRules = new[]\n {\n new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomPolicyRuleArgs\n {\n Enforce = true,\n Condition = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomPolicyRuleConditionArgs\n {\n Description = \"condition description\",\n Expression = \"resource.matchTagId('tagKeys/key_id','tagValues/value_id')\",\n Title = \"a CEL condition\",\n },\n },\n },\n },\n },\n },\n },\n },\n new Gcp.SecurityPosture.Inputs.PosturePolicySetArgs\n {\n PolicySetId = \"sha_policy_set\",\n Description = \"set of sha policies\",\n Policies = new[]\n {\n new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyArgs\n {\n PolicyId = \"sha_builtin_module\",\n Constraint = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintArgs\n {\n SecurityHealthAnalyticsModule = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsModuleArgs\n {\n ModuleName = \"BIGQUERY_TABLE_CMEK_DISABLED\",\n ModuleEnablementState = \"ENABLED\",\n },\n },\n Description = \"enable BIGQUERY_TABLE_CMEK_DISABLED\",\n },\n new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyArgs\n {\n PolicyId = \"sha_custom_module\",\n Constraint = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintArgs\n {\n SecurityHealthAnalyticsCustomModule = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleArgs\n {\n DisplayName = \"custom_SHA_policy\",\n Config = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigArgs\n {\n Predicate = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigPredicateArgs\n {\n Expression = \"resource.rotationPeriod \u003e duration('2592000s')\",\n },\n CustomOutput = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutputArgs\n {\n Properties = new[]\n {\n new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutputPropertyArgs\n {\n Name = \"duration\",\n ValueExpression = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutputPropertyValueExpressionArgs\n {\n Expression = \"resource.rotationPeriod\",\n },\n },\n },\n },\n ResourceSelector = new Gcp.SecurityPosture.Inputs.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigResourceSelectorArgs\n {\n ResourceTypes = new[]\n {\n \"cloudkms.googleapis.com/CryptoKey\",\n },\n },\n Severity = \"LOW\",\n Description = \"Custom Module\",\n Recommendation = \"Testing custom modules\",\n },\n ModuleEnablementState = \"ENABLED\",\n },\n },\n },\n },\n },\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/securityposture\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := securityposture.NewPosture(ctx, \"posture1\", \u0026securityposture.PostureArgs{\n\t\t\tPostureId: pulumi.String(\"posture_example\"),\n\t\t\tParent: pulumi.String(\"organizations/123456789\"),\n\t\t\tLocation: pulumi.String(\"global\"),\n\t\t\tState: pulumi.String(\"ACTIVE\"),\n\t\t\tDescription: pulumi.String(\"a new posture\"),\n\t\t\tPolicySets: securityposture.PosturePolicySetArray{\n\t\t\t\t\u0026securityposture.PosturePolicySetArgs{\n\t\t\t\t\tPolicySetId: pulumi.String(\"org_policy_set\"),\n\t\t\t\t\tDescription: pulumi.String(\"set of org policies\"),\n\t\t\t\t\tPolicies: securityposture.PosturePolicySetPolicyArray{\n\t\t\t\t\t\t\u0026securityposture.PosturePolicySetPolicyArgs{\n\t\t\t\t\t\t\tPolicyId: pulumi.String(\"canned_org_policy\"),\n\t\t\t\t\t\t\tConstraint: \u0026securityposture.PosturePolicySetPolicyConstraintArgs{\n\t\t\t\t\t\t\t\tOrgPolicyConstraint: \u0026securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintArgs{\n\t\t\t\t\t\t\t\t\tCannedConstraintId: pulumi.String(\"storage.uniformBucketLevelAccess\"),\n\t\t\t\t\t\t\t\t\tPolicyRules: securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintPolicyRuleArray{\n\t\t\t\t\t\t\t\t\t\t\u0026securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintPolicyRuleArgs{\n\t\t\t\t\t\t\t\t\t\t\tEnforce: pulumi.Bool(true),\n\t\t\t\t\t\t\t\t\t\t\tCondition: \u0026securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintPolicyRuleConditionArgs{\n\t\t\t\t\t\t\t\t\t\t\t\tDescription: pulumi.String(\"condition description\"),\n\t\t\t\t\t\t\t\t\t\t\t\tExpression: pulumi.String(\"resource.matchTag('org_id/tag_key_short_name,'tag_value_short_name')\"),\n\t\t\t\t\t\t\t\t\t\t\t\tTitle: pulumi.String(\"a CEL condition\"),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026securityposture.PosturePolicySetPolicyArgs{\n\t\t\t\t\t\t\tPolicyId: pulumi.String(\"custom_org_policy\"),\n\t\t\t\t\t\t\tConstraint: \u0026securityposture.PosturePolicySetPolicyConstraintArgs{\n\t\t\t\t\t\t\t\tOrgPolicyConstraintCustom: \u0026securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomArgs{\n\t\t\t\t\t\t\t\t\tCustomConstraint: \u0026securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomCustomConstraintArgs{\n\t\t\t\t\t\t\t\t\t\tName: pulumi.String(\"organizations/123456789/customConstraints/custom.disableGkeAutoUpgrade\"),\n\t\t\t\t\t\t\t\t\t\tDisplayName: pulumi.String(\"Disable GKE auto upgrade\"),\n\t\t\t\t\t\t\t\t\t\tDescription: pulumi.String(\"Only allow GKE NodePool resource to be created or updated if AutoUpgrade is not enabled where this custom constraint is enforced.\"),\n\t\t\t\t\t\t\t\t\t\tActionType: pulumi.String(\"ALLOW\"),\n\t\t\t\t\t\t\t\t\t\tCondition: pulumi.String(\"resource.management.autoUpgrade == false\"),\n\t\t\t\t\t\t\t\t\t\tMethodTypes: pulumi.StringArray{\n\t\t\t\t\t\t\t\t\t\t\tpulumi.String(\"CREATE\"),\n\t\t\t\t\t\t\t\t\t\t\tpulumi.String(\"UPDATE\"),\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\tResourceTypes: pulumi.StringArray{\n\t\t\t\t\t\t\t\t\t\t\tpulumi.String(\"container.googleapis.com/NodePool\"),\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tPolicyRules: securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomPolicyRuleArray{\n\t\t\t\t\t\t\t\t\t\t\u0026securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomPolicyRuleArgs{\n\t\t\t\t\t\t\t\t\t\t\tEnforce: pulumi.Bool(true),\n\t\t\t\t\t\t\t\t\t\t\tCondition: \u0026securityposture.PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomPolicyRuleConditionArgs{\n\t\t\t\t\t\t\t\t\t\t\t\tDescription: pulumi.String(\"condition description\"),\n\t\t\t\t\t\t\t\t\t\t\t\tExpression: pulumi.String(\"resource.matchTagId('tagKeys/key_id','tagValues/value_id')\"),\n\t\t\t\t\t\t\t\t\t\t\t\tTitle: pulumi.String(\"a CEL condition\"),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\u0026securityposture.PosturePolicySetArgs{\n\t\t\t\t\tPolicySetId: pulumi.String(\"sha_policy_set\"),\n\t\t\t\t\tDescription: pulumi.String(\"set of sha policies\"),\n\t\t\t\t\tPolicies: securityposture.PosturePolicySetPolicyArray{\n\t\t\t\t\t\t\u0026securityposture.PosturePolicySetPolicyArgs{\n\t\t\t\t\t\t\tPolicyId: pulumi.String(\"sha_builtin_module\"),\n\t\t\t\t\t\t\tConstraint: \u0026securityposture.PosturePolicySetPolicyConstraintArgs{\n\t\t\t\t\t\t\t\tSecurityHealthAnalyticsModule: \u0026securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsModuleArgs{\n\t\t\t\t\t\t\t\t\tModuleName: pulumi.String(\"BIGQUERY_TABLE_CMEK_DISABLED\"),\n\t\t\t\t\t\t\t\t\tModuleEnablementState: pulumi.String(\"ENABLED\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tDescription: pulumi.String(\"enable BIGQUERY_TABLE_CMEK_DISABLED\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\u0026securityposture.PosturePolicySetPolicyArgs{\n\t\t\t\t\t\t\tPolicyId: pulumi.String(\"sha_custom_module\"),\n\t\t\t\t\t\t\tConstraint: \u0026securityposture.PosturePolicySetPolicyConstraintArgs{\n\t\t\t\t\t\t\t\tSecurityHealthAnalyticsCustomModule: \u0026securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleArgs{\n\t\t\t\t\t\t\t\t\tDisplayName: pulumi.String(\"custom_SHA_policy\"),\n\t\t\t\t\t\t\t\t\tConfig: \u0026securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigArgs{\n\t\t\t\t\t\t\t\t\t\tPredicate: \u0026securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigPredicateArgs{\n\t\t\t\t\t\t\t\t\t\t\tExpression: pulumi.String(\"resource.rotationPeriod \u003e duration('2592000s')\"),\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\tCustomOutput: securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutputArgs{\n\t\t\t\t\t\t\t\t\t\t\tProperties: securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutputPropertyArray{\n\t\t\t\t\t\t\t\t\t\t\t\t\u0026securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutputPropertyArgs{\n\t\t\t\t\t\t\t\t\t\t\t\t\tName: pulumi.String(\"duration\"),\n\t\t\t\t\t\t\t\t\t\t\t\t\tValueExpression: \u0026securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutputPropertyValueExpressionArgs{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tExpression: pulumi.String(\"resource.rotationPeriod\"),\n\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\tResourceSelector: \u0026securityposture.PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigResourceSelectorArgs{\n\t\t\t\t\t\t\t\t\t\t\tResourceTypes: pulumi.StringArray{\n\t\t\t\t\t\t\t\t\t\t\t\tpulumi.String(\"cloudkms.googleapis.com/CryptoKey\"),\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\tSeverity: pulumi.String(\"LOW\"),\n\t\t\t\t\t\t\t\t\t\tDescription: pulumi.String(\"Custom Module\"),\n\t\t\t\t\t\t\t\t\t\tRecommendation: pulumi.String(\"Testing custom modules\"),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tModuleEnablementState: pulumi.String(\"ENABLED\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.securityposture.Posture;\nimport com.pulumi.gcp.securityposture.PostureArgs;\nimport com.pulumi.gcp.securityposture.inputs.PosturePolicySetArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n var posture1 = new Posture(\"posture1\", PostureArgs.builder()\n .postureId(\"posture_example\")\n .parent(\"organizations/123456789\")\n .location(\"global\")\n .state(\"ACTIVE\")\n .description(\"a new posture\")\n .policySets( \n PosturePolicySetArgs.builder()\n .policySetId(\"org_policy_set\")\n .description(\"set of org policies\")\n .policies( \n PosturePolicySetPolicyArgs.builder()\n .policyId(\"canned_org_policy\")\n .constraint(PosturePolicySetPolicyConstraintArgs.builder()\n .orgPolicyConstraint(PosturePolicySetPolicyConstraintOrgPolicyConstraintArgs.builder()\n .cannedConstraintId(\"storage.uniformBucketLevelAccess\")\n .policyRules(PosturePolicySetPolicyConstraintOrgPolicyConstraintPolicyRuleArgs.builder()\n .enforce(true)\n .condition(PosturePolicySetPolicyConstraintOrgPolicyConstraintPolicyRuleConditionArgs.builder()\n .description(\"condition description\")\n .expression(\"resource.matchTag('org_id/tag_key_short_name,'tag_value_short_name')\")\n .title(\"a CEL condition\")\n .build())\n .build())\n .build())\n .build())\n .build(),\n PosturePolicySetPolicyArgs.builder()\n .policyId(\"custom_org_policy\")\n .constraint(PosturePolicySetPolicyConstraintArgs.builder()\n .orgPolicyConstraintCustom(PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomArgs.builder()\n .customConstraint(PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomCustomConstraintArgs.builder()\n .name(\"organizations/123456789/customConstraints/custom.disableGkeAutoUpgrade\")\n .displayName(\"Disable GKE auto upgrade\")\n .description(\"Only allow GKE NodePool resource to be created or updated if AutoUpgrade is not enabled where this custom constraint is enforced.\")\n .actionType(\"ALLOW\")\n .condition(\"resource.management.autoUpgrade == false\")\n .methodTypes( \n \"CREATE\",\n \"UPDATE\")\n .resourceTypes(\"container.googleapis.com/NodePool\")\n .build())\n .policyRules(PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomPolicyRuleArgs.builder()\n .enforce(true)\n .condition(PosturePolicySetPolicyConstraintOrgPolicyConstraintCustomPolicyRuleConditionArgs.builder()\n .description(\"condition description\")\n .expression(\"resource.matchTagId('tagKeys/key_id','tagValues/value_id')\")\n .title(\"a CEL condition\")\n .build())\n .build())\n .build())\n .build())\n .build())\n .build(),\n PosturePolicySetArgs.builder()\n .policySetId(\"sha_policy_set\")\n .description(\"set of sha policies\")\n .policies( \n PosturePolicySetPolicyArgs.builder()\n .policyId(\"sha_builtin_module\")\n .constraint(PosturePolicySetPolicyConstraintArgs.builder()\n .securityHealthAnalyticsModule(PosturePolicySetPolicyConstraintSecurityHealthAnalyticsModuleArgs.builder()\n .moduleName(\"BIGQUERY_TABLE_CMEK_DISABLED\")\n .moduleEnablementState(\"ENABLED\")\n .build())\n .build())\n .description(\"enable BIGQUERY_TABLE_CMEK_DISABLED\")\n .build(),\n PosturePolicySetPolicyArgs.builder()\n .policyId(\"sha_custom_module\")\n .constraint(PosturePolicySetPolicyConstraintArgs.builder()\n .securityHealthAnalyticsCustomModule(PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleArgs.builder()\n .displayName(\"custom_SHA_policy\")\n .config(PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigArgs.builder()\n .predicate(PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigPredicateArgs.builder()\n .expression(\"resource.rotationPeriod \u003e duration('2592000s')\")\n .build())\n .customOutput(PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutputArgs.builder()\n .properties(PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutputPropertyArgs.builder()\n .name(\"duration\")\n .valueExpression(PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigCustomOutputPropertyValueExpressionArgs.builder()\n .expression(\"resource.rotationPeriod\")\n .build())\n .build())\n .build())\n .resourceSelector(PosturePolicySetPolicyConstraintSecurityHealthAnalyticsCustomModuleConfigResourceSelectorArgs.builder()\n .resourceTypes(\"cloudkms.googleapis.com/CryptoKey\")\n .build())\n .severity(\"LOW\")\n .description(\"Custom Module\")\n .recommendation(\"Testing custom modules\")\n .build())\n .moduleEnablementState(\"ENABLED\")\n .build())\n .build())\n .build())\n .build())\n .build());\n\n }\n}\n```\n```yaml\nresources:\n posture1:\n type: gcp:securityposture:Posture\n properties:\n postureId: posture_example\n parent: organizations/123456789\n location: global\n state: ACTIVE\n description: a new posture\n policySets:\n - policySetId: org_policy_set\n description: set of org policies\n policies:\n - policyId: canned_org_policy\n constraint:\n orgPolicyConstraint:\n cannedConstraintId: storage.uniformBucketLevelAccess\n policyRules:\n - enforce: true\n condition:\n description: condition description\n expression: resource.matchTag('org_id/tag_key_short_name,'tag_value_short_name')\n title: a CEL condition\n - policyId: custom_org_policy\n constraint:\n orgPolicyConstraintCustom:\n customConstraint:\n name: organizations/123456789/customConstraints/custom.disableGkeAutoUpgrade\n displayName: Disable GKE auto upgrade\n description: Only allow GKE NodePool resource to be created or updated if AutoUpgrade is not enabled where this custom constraint is enforced.\n actionType: ALLOW\n condition: resource.management.autoUpgrade == false\n methodTypes:\n - CREATE\n - UPDATE\n resourceTypes:\n - container.googleapis.com/NodePool\n policyRules:\n - enforce: true\n condition:\n description: condition description\n expression: resource.matchTagId('tagKeys/key_id','tagValues/value_id')\n title: a CEL condition\n - policySetId: sha_policy_set\n description: set of sha policies\n policies:\n - policyId: sha_builtin_module\n constraint:\n securityHealthAnalyticsModule:\n moduleName: BIGQUERY_TABLE_CMEK_DISABLED\n moduleEnablementState: ENABLED\n description: enable BIGQUERY_TABLE_CMEK_DISABLED\n - policyId: sha_custom_module\n constraint:\n securityHealthAnalyticsCustomModule:\n displayName: custom_SHA_policy\n config:\n predicate:\n expression: resource.rotationPeriod \u003e duration('2592000s')\n customOutput:\n properties:\n - name: duration\n valueExpression:\n expression: resource.rotationPeriod\n resourceSelector:\n resourceTypes:\n - cloudkms.googleapis.com/CryptoKey\n severity: LOW\n description: Custom Module\n recommendation: Testing custom modules\n moduleEnablementState: ENABLED\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n## Import\n\nPosture can be imported using any of these accepted formats:\n\n* `{{parent}}/locations/{{location}}/postures/{{posture_id}}`\n\nWhen using the `pulumi import` command, Posture can be imported using one of the formats above. For example:\n\n```sh\n$ pulumi import gcp:securityposture/posture:Posture default {{parent}}/locations/{{location}}/postures/{{posture_id}}\n```\n\n", "properties": { @@ -257274,6 +259191,49 @@ "type": "object" } }, + "gcp:certificatemanager/getCertificates:getCertificates": { + "description": "List all certificates within Google Certificate Manager for a given project, region or filter.\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst default = gcp.certificatemanager.getCertificates({});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.certificatemanager.get_certificates()\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = Gcp.CertificateManager.GetCertificates.Invoke();\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/certificatemanager\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := certificatemanager.GetCertificates(ctx, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.certificatemanager.CertificatemanagerFunctions;\nimport com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var default = CertificatemanagerFunctions.getCertificates();\n\n }\n}\n```\n```yaml\nvariables:\n default:\n fn::invoke:\n Function: gcp:certificatemanager:getCertificates\n Arguments: {}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### With A Filter\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst default = gcp.certificatemanager.getCertificates({\n filter: \"name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ndefault = gcp.certificatemanager.get_certificates(filter=\"name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var @default = Gcp.CertificateManager.GetCertificates.Invoke(new()\n {\n Filter = \"name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/certificatemanager\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := certificatemanager.GetCertificates(ctx, \u0026certificatemanager.GetCertificatesArgs{\n\t\t\tFilter: pulumi.StringRef(\"name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.certificatemanager.CertificatemanagerFunctions;\nimport com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var default = CertificatemanagerFunctions.getCertificates(GetCertificatesArgs.builder()\n .filter(\"name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*\")\n .build());\n\n }\n}\n```\n```yaml\nvariables:\n default:\n fn::invoke:\n Function: gcp:certificatemanager:getCertificates\n Arguments:\n filter: name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*\n```\n\u003c!--End PulumiCodeChooser --\u003e\n", + "inputs": { + "description": "A collection of arguments for invoking getCertificates.\n", + "properties": { + "filter": { + "type": "string", + "description": "Filter expression to restrict the certificates returned.\n" + }, + "region": { + "type": "string", + "description": "The region in which the resource belongs. If it is not provided, `GLOBAL` is used.\n" + } + }, + "type": "object" + }, + "outputs": { + "description": "A collection of values returned by getCertificates.\n", + "properties": { + "certificates": { + "items": { + "$ref": "#/types/gcp:certificatemanager/getCertificatesCertificate:getCertificatesCertificate" + }, + "type": "array" + }, + "filter": { + "type": "string" + }, + "id": { + "description": "The provider-assigned unique ID for this managed resource.\n", + "type": "string" + }, + "region": { + "type": "string" + } + }, + "required": [ + "certificates", + "id" + ], + "type": "object" + } + }, "gcp:cloudasset/getResourcesSearchAll:getResourcesSearchAll": { "description": "## Example Usage\n\n### Searching For All Projects In An Org\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst projects = gcp.cloudasset.getResourcesSearchAll({\n scope: \"organizations/0123456789\",\n assetTypes: [\"cloudresourcemanager.googleapis.com/Project\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nprojects = gcp.cloudasset.get_resources_search_all(scope=\"organizations/0123456789\",\n asset_types=[\"cloudresourcemanager.googleapis.com/Project\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var projects = Gcp.CloudAsset.GetResourcesSearchAll.Invoke(new()\n {\n Scope = \"organizations/0123456789\",\n AssetTypes = new[]\n {\n \"cloudresourcemanager.googleapis.com/Project\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudasset\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudasset.LookupResourcesSearchAll(ctx, \u0026cloudasset.LookupResourcesSearchAllArgs{\n\t\t\tScope: \"organizations/0123456789\",\n\t\t\tAssetTypes: []string{\n\t\t\t\t\"cloudresourcemanager.googleapis.com/Project\",\n\t\t\t},\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudasset.CloudassetFunctions;\nimport com.pulumi.gcp.cloudasset.inputs.GetResourcesSearchAllArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var projects = CloudassetFunctions.getResourcesSearchAll(GetResourcesSearchAllArgs.builder()\n .scope(\"organizations/0123456789\")\n .assetTypes(\"cloudresourcemanager.googleapis.com/Project\")\n .build());\n\n }\n}\n```\n```yaml\nvariables:\n projects:\n fn::invoke:\n Function: gcp:cloudasset:getResourcesSearchAll\n Arguments:\n scope: organizations/0123456789\n assetTypes:\n - cloudresourcemanager.googleapis.com/Project\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Searching For All Projects With CloudBuild API Enabled\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst cloudBuildProjects = gcp.cloudasset.getResourcesSearchAll({\n scope: \"organizations/0123456789\",\n assetTypes: [\"serviceusage.googleapis.com/Service\"],\n query: \"displayName:cloudbuild.googleapis.com AND state:ENABLED\",\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\ncloud_build_projects = gcp.cloudasset.get_resources_search_all(scope=\"organizations/0123456789\",\n asset_types=[\"serviceusage.googleapis.com/Service\"],\n query=\"displayName:cloudbuild.googleapis.com AND state:ENABLED\")\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var cloudBuildProjects = Gcp.CloudAsset.GetResourcesSearchAll.Invoke(new()\n {\n Scope = \"organizations/0123456789\",\n AssetTypes = new[]\n {\n \"serviceusage.googleapis.com/Service\",\n },\n Query = \"displayName:cloudbuild.googleapis.com AND state:ENABLED\",\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudasset\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudasset.LookupResourcesSearchAll(ctx, \u0026cloudasset.LookupResourcesSearchAllArgs{\n\t\t\tScope: \"organizations/0123456789\",\n\t\t\tAssetTypes: []string{\n\t\t\t\t\"serviceusage.googleapis.com/Service\",\n\t\t\t},\n\t\t\tQuery: pulumi.StringRef(\"displayName:cloudbuild.googleapis.com AND state:ENABLED\"),\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudasset.CloudassetFunctions;\nimport com.pulumi.gcp.cloudasset.inputs.GetResourcesSearchAllArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var cloudBuildProjects = CloudassetFunctions.getResourcesSearchAll(GetResourcesSearchAllArgs.builder()\n .scope(\"organizations/0123456789\")\n .assetTypes(\"serviceusage.googleapis.com/Service\")\n .query(\"displayName:cloudbuild.googleapis.com AND state:ENABLED\")\n .build());\n\n }\n}\n```\n```yaml\nvariables:\n cloudBuildProjects:\n fn::invoke:\n Function: gcp:cloudasset:getResourcesSearchAll\n Arguments:\n scope: organizations/0123456789\n assetTypes:\n - serviceusage.googleapis.com/Service\n query: displayName:cloudbuild.googleapis.com AND state:ENABLED\n```\n\u003c!--End PulumiCodeChooser --\u003e\n\n### Searching For All Service Accounts In A Project\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst projectServiceAccounts = gcp.cloudasset.getResourcesSearchAll({\n scope: \"projects/my-project-id\",\n assetTypes: [\"iam.googleapis.com/ServiceAccount\"],\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nproject_service_accounts = gcp.cloudasset.get_resources_search_all(scope=\"projects/my-project-id\",\n asset_types=[\"iam.googleapis.com/ServiceAccount\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var projectServiceAccounts = Gcp.CloudAsset.GetResourcesSearchAll.Invoke(new()\n {\n Scope = \"projects/my-project-id\",\n AssetTypes = new[]\n {\n \"iam.googleapis.com/ServiceAccount\",\n },\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudasset\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := cloudasset.LookupResourcesSearchAll(ctx, \u0026cloudasset.LookupResourcesSearchAllArgs{\n\t\t\tScope: \"projects/my-project-id\",\n\t\t\tAssetTypes: []string{\n\t\t\t\t\"iam.googleapis.com/ServiceAccount\",\n\t\t\t},\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.cloudasset.CloudassetFunctions;\nimport com.pulumi.gcp.cloudasset.inputs.GetResourcesSearchAllArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var projectServiceAccounts = CloudassetFunctions.getResourcesSearchAll(GetResourcesSearchAllArgs.builder()\n .scope(\"projects/my-project-id\")\n .assetTypes(\"iam.googleapis.com/ServiceAccount\")\n .build());\n\n }\n}\n```\n```yaml\nvariables:\n projectServiceAccounts:\n fn::invoke:\n Function: gcp:cloudasset:getResourcesSearchAll\n Arguments:\n scope: projects/my-project-id\n assetTypes:\n - iam.googleapis.com/ServiceAccount\n```\n\u003c!--End PulumiCodeChooser --\u003e\n", "inputs": { @@ -261568,7 +263528,7 @@ "type": "string" }, "currentStatus": { - "description": "The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`,\n", + "description": "The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).\n", "type": "string" }, "deletionProtection": { @@ -269338,6 +271298,12 @@ }, "workloadIdentityPoolProviderId": { "type": "string" + }, + "x509s": { + "items": { + "$ref": "#/types/gcp:iam/getWorkloadIdentityPoolProviderX509:getWorkloadIdentityPoolProviderX509" + }, + "type": "array" } }, "required": [ @@ -269353,6 +271319,7 @@ "state", "workloadIdentityPoolId", "workloadIdentityPoolProviderId", + "x509s", "id" ], "type": "object" @@ -270000,6 +271967,134 @@ "type": "object" } }, + "gcp:kms/getCryptoKeyLatestVersion:getCryptoKeyLatestVersion": { + "description": "Provides access to the latest Google Cloud Platform KMS CryptoKeyVersion in a CryptoKey. For more information see\n[the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version)\nand\n[API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions).\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst myKeyRing = gcp.kms.getKMSKeyRing({\n name: \"my-key-ring\",\n location: \"us-central1\",\n});\nconst myCryptoKey = myKeyRing.then(myKeyRing =\u003e gcp.kms.getKMSCryptoKey({\n name: \"my-crypto-key\",\n keyRing: myKeyRing.id,\n}));\nconst myCryptoKeyLatestVersion = gcp.kms.getCryptoKeyLatestVersion({\n cryptoKey: myKey.id,\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nmy_key_ring = gcp.kms.get_kms_key_ring(name=\"my-key-ring\",\n location=\"us-central1\")\nmy_crypto_key = gcp.kms.get_kms_crypto_key(name=\"my-crypto-key\",\n key_ring=my_key_ring.id)\nmy_crypto_key_latest_version = gcp.kms.get_crypto_key_latest_version(crypto_key=my_key[\"id\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myKeyRing = Gcp.Kms.GetKMSKeyRing.Invoke(new()\n {\n Name = \"my-key-ring\",\n Location = \"us-central1\",\n });\n\n var myCryptoKey = Gcp.Kms.GetKMSCryptoKey.Invoke(new()\n {\n Name = \"my-crypto-key\",\n KeyRing = myKeyRing.Apply(getKMSKeyRingResult =\u003e getKMSKeyRingResult.Id),\n });\n\n var myCryptoKeyLatestVersion = Gcp.Kms.GetCryptoKeyLatestVersion.Invoke(new()\n {\n CryptoKey = myKey.Id,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tmyKeyRing, err := kms.GetKMSKeyRing(ctx, \u0026kms.GetKMSKeyRingArgs{\n\t\t\tName: \"my-key-ring\",\n\t\t\tLocation: \"us-central1\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = kms.GetKMSCryptoKey(ctx, \u0026kms.GetKMSCryptoKeyArgs{\n\t\t\tName: \"my-crypto-key\",\n\t\t\tKeyRing: myKeyRing.Id,\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = kms.GetCryptoKeyLatestVersion(ctx, \u0026kms.GetCryptoKeyLatestVersionArgs{\n\t\t\tCryptoKey: myKey.Id,\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.kms.KmsFunctions;\nimport com.pulumi.gcp.kms.inputs.GetKMSKeyRingArgs;\nimport com.pulumi.gcp.kms.inputs.GetKMSCryptoKeyArgs;\nimport com.pulumi.gcp.kms.inputs.GetCryptoKeyLatestVersionArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var myKeyRing = KmsFunctions.getKMSKeyRing(GetKMSKeyRingArgs.builder()\n .name(\"my-key-ring\")\n .location(\"us-central1\")\n .build());\n\n final var myCryptoKey = KmsFunctions.getKMSCryptoKey(GetKMSCryptoKeyArgs.builder()\n .name(\"my-crypto-key\")\n .keyRing(myKeyRing.applyValue(getKMSKeyRingResult -\u003e getKMSKeyRingResult.id()))\n .build());\n\n final var myCryptoKeyLatestVersion = KmsFunctions.getCryptoKeyLatestVersion(GetCryptoKeyLatestVersionArgs.builder()\n .cryptoKey(myKey.id())\n .build());\n\n }\n}\n```\n```yaml\nvariables:\n myKeyRing:\n fn::invoke:\n Function: gcp:kms:getKMSKeyRing\n Arguments:\n name: my-key-ring\n location: us-central1\n myCryptoKey:\n fn::invoke:\n Function: gcp:kms:getKMSCryptoKey\n Arguments:\n name: my-crypto-key\n keyRing: ${myKeyRing.id}\n myCryptoKeyLatestVersion:\n fn::invoke:\n Function: gcp:kms:getCryptoKeyLatestVersion\n Arguments:\n cryptoKey: ${myKey.id}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n", + "inputs": { + "description": "A collection of arguments for invoking getCryptoKeyLatestVersion.\n", + "properties": { + "cryptoKey": { + "type": "string", + "description": "The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the \n`gcp.kms.CryptoKey` resource/datasource.\n", + "willReplaceOnChanges": true + }, + "filter": { + "type": "string", + "description": "The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering.\n\nExample filter values if filtering on state.\n\n* `\"state:ENABLED\"` will retrieve the latest cryptoKeyVersion that has the state \"ENABLED\".\n\n[See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering)\n" + } + }, + "type": "object", + "required": [ + "cryptoKey" + ] + }, + "outputs": { + "description": "A collection of values returned by getCryptoKeyLatestVersion.\n", + "properties": { + "algorithm": { + "description": "The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports.\n", + "type": "string" + }, + "cryptoKey": { + "type": "string" + }, + "filter": { + "type": "string" + }, + "id": { + "description": "The provider-assigned unique ID for this managed resource.\n", + "type": "string" + }, + "name": { + "type": "string" + }, + "protectionLevel": { + "description": "The ProtectionLevel describing how crypto operations are performed with this CryptoKeyVersion. See the [protection_level reference](https://cloud.google.com/kms/docs/reference/rest/v1/ProtectionLevel) for possible outputs.\n", + "type": "string" + }, + "publicKeys": { + "description": "If the enclosing CryptoKey has purpose `ASYMMETRIC_SIGN` or `ASYMMETRIC_DECRYPT`, this block contains details about the public key associated to this CryptoKeyVersion. Structure is documented below.\n", + "items": { + "$ref": "#/types/gcp:kms/getCryptoKeyLatestVersionPublicKey:getCryptoKeyLatestVersionPublicKey" + }, + "type": "array" + }, + "state": { + "description": "The current state of the latest CryptoKeyVersion. See the [state reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions#CryptoKeyVersion.CryptoKeyVersionState) for possible outputs.\n", + "type": "string" + }, + "version": { + "type": "integer" + } + }, + "required": [ + "algorithm", + "cryptoKey", + "name", + "protectionLevel", + "publicKeys", + "state", + "version", + "id" + ], + "type": "object" + } + }, + "gcp:kms/getCryptoKeyVersions:getCryptoKeyVersions": { + "description": "Provides access to Google Cloud Platform KMS CryptoKeyVersions. For more information see\n[the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version)\nand\n[API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions).\n\n\n## Example Usage\n\n\u003c!--Start PulumiCodeChooser --\u003e\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as gcp from \"@pulumi/gcp\";\n\nconst myKeyRing = gcp.kms.getKMSKeyRing({\n name: \"my-key-ring\",\n location: \"us-central1\",\n});\nconst myCryptoKey = myKeyRing.then(myKeyRing =\u003e gcp.kms.getKMSCryptoKey({\n name: \"my-crypto-key\",\n keyRing: myKeyRing.id,\n}));\nconst myCryptoKeyVersions = gcp.kms.getCryptoKeyVersions({\n cryptoKey: myKey.id,\n});\n```\n```python\nimport pulumi\nimport pulumi_gcp as gcp\n\nmy_key_ring = gcp.kms.get_kms_key_ring(name=\"my-key-ring\",\n location=\"us-central1\")\nmy_crypto_key = gcp.kms.get_kms_crypto_key(name=\"my-crypto-key\",\n key_ring=my_key_ring.id)\nmy_crypto_key_versions = gcp.kms.get_crypto_key_versions(crypto_key=my_key[\"id\"])\n```\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing Gcp = Pulumi.Gcp;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myKeyRing = Gcp.Kms.GetKMSKeyRing.Invoke(new()\n {\n Name = \"my-key-ring\",\n Location = \"us-central1\",\n });\n\n var myCryptoKey = Gcp.Kms.GetKMSCryptoKey.Invoke(new()\n {\n Name = \"my-crypto-key\",\n KeyRing = myKeyRing.Apply(getKMSKeyRingResult =\u003e getKMSKeyRingResult.Id),\n });\n\n var myCryptoKeyVersions = Gcp.Kms.GetCryptoKeyVersions.Invoke(new()\n {\n CryptoKey = myKey.Id,\n });\n\n});\n```\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tmyKeyRing, err := kms.GetKMSKeyRing(ctx, \u0026kms.GetKMSKeyRingArgs{\n\t\t\tName: \"my-key-ring\",\n\t\t\tLocation: \"us-central1\",\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = kms.GetKMSCryptoKey(ctx, \u0026kms.GetKMSCryptoKeyArgs{\n\t\t\tName: \"my-crypto-key\",\n\t\t\tKeyRing: myKeyRing.Id,\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = kms.GetCryptoKeyVersions(ctx, \u0026kms.GetCryptoKeyVersionsArgs{\n\t\t\tCryptoKey: myKey.Id,\n\t\t}, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n```\n```java\npackage generated_program;\n\nimport com.pulumi.Context;\nimport com.pulumi.Pulumi;\nimport com.pulumi.core.Output;\nimport com.pulumi.gcp.kms.KmsFunctions;\nimport com.pulumi.gcp.kms.inputs.GetKMSKeyRingArgs;\nimport com.pulumi.gcp.kms.inputs.GetKMSCryptoKeyArgs;\nimport com.pulumi.gcp.kms.inputs.GetCryptoKeyVersionsArgs;\nimport java.util.List;\nimport java.util.ArrayList;\nimport java.util.Map;\nimport java.io.File;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\n\npublic class App {\n public static void main(String[] args) {\n Pulumi.run(App::stack);\n }\n\n public static void stack(Context ctx) {\n final var myKeyRing = KmsFunctions.getKMSKeyRing(GetKMSKeyRingArgs.builder()\n .name(\"my-key-ring\")\n .location(\"us-central1\")\n .build());\n\n final var myCryptoKey = KmsFunctions.getKMSCryptoKey(GetKMSCryptoKeyArgs.builder()\n .name(\"my-crypto-key\")\n .keyRing(myKeyRing.applyValue(getKMSKeyRingResult -\u003e getKMSKeyRingResult.id()))\n .build());\n\n final var myCryptoKeyVersions = KmsFunctions.getCryptoKeyVersions(GetCryptoKeyVersionsArgs.builder()\n .cryptoKey(myKey.id())\n .build());\n\n }\n}\n```\n```yaml\nvariables:\n myKeyRing:\n fn::invoke:\n Function: gcp:kms:getKMSKeyRing\n Arguments:\n name: my-key-ring\n location: us-central1\n myCryptoKey:\n fn::invoke:\n Function: gcp:kms:getKMSCryptoKey\n Arguments:\n name: my-crypto-key\n keyRing: ${myKeyRing.id}\n myCryptoKeyVersions:\n fn::invoke:\n Function: gcp:kms:getCryptoKeyVersions\n Arguments:\n cryptoKey: ${myKey.id}\n```\n\u003c!--End PulumiCodeChooser --\u003e\n", + "inputs": { + "description": "A collection of arguments for invoking getCryptoKeyVersions.\n", + "properties": { + "cryptoKey": { + "type": "string", + "description": "The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the \n`gcp.kms.CryptoKey` resource/datasource.\n", + "willReplaceOnChanges": true + }, + "filter": { + "type": "string", + "description": "The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering.\n\nExample filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions.\n\n* `\"name:my-key-\"` will retrieve cryptoKeyVersions that contain \"my-key-\" anywhere in their name.\n* `\"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1\"` will only retrieve a key with that exact name.\n\n[See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering)\n" + } + }, + "type": "object", + "required": [ + "cryptoKey" + ] + }, + "outputs": { + "description": "A collection of values returned by getCryptoKeyVersions.\n", + "properties": { + "cryptoKey": { + "type": "string" + }, + "filter": { + "type": "string" + }, + "id": { + "description": "The provider-assigned unique ID for this managed resource.\n", + "type": "string" + }, + "publicKeys": { + "items": { + "$ref": "#/types/gcp:kms/getCryptoKeyVersionsPublicKey:getCryptoKeyVersionsPublicKey" + }, + "type": "array" + }, + "versions": { + "description": "A list of all the retrieved crypto key versions from the provided crypto key. This list is influenced by the provided filter argument.\n", + "items": { + "$ref": "#/types/gcp:kms/getCryptoKeyVersionsVersion:getCryptoKeyVersionsVersion" + }, + "type": "array" + } + }, + "required": [ + "cryptoKey", + "publicKeys", + "versions", + "id" + ], + "type": "object" + } + }, "gcp:kms/getCryptoKeys:getCryptoKeys": { "description": "Provides access to all Google Cloud Platform KMS CryptoKeys in a given KeyRing. For more information see\n[the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key)\nand\n[API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys).\n\nA CryptoKey is an interface to key material which can be used to encrypt and decrypt data. A CryptoKey belongs to a\nGoogle Cloud KMS KeyRing.\n\n", "inputs": { @@ -272193,6 +274288,12 @@ }, "secret": true, "type": "object" + }, + "tags": { + "additionalProperties": { + "type": "string" + }, + "type": "object" } }, "required": [ @@ -272205,6 +274306,7 @@ "name", "number", "orgId", + "tags", "pulumiLabels", "id" ], diff --git a/provider/go.mod b/provider/go.mod index 333129a51f..15f0a37066 100644 --- a/provider/go.mod +++ b/provider/go.mod @@ -13,7 +13,7 @@ require ( github.com/pulumi/pulumi/pkg/v3 v3.130.0 github.com/pulumi/pulumi/sdk/v3 v3.130.0 github.com/stretchr/testify v1.9.0 - google.golang.org/api v0.191.0 + google.golang.org/api v0.193.0 sourcegraph.com/sourcegraph/appdash v0.0.0-20211028080628-e2786a622600 ) @@ -26,17 +26,17 @@ replace ( require ( bitbucket.org/creachadair/stringset v0.0.8 // indirect cel.dev/expr v0.15.0 // indirect - cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.8.0 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect - cloud.google.com/go/bigtable v1.29.0 // indirect + cloud.google.com/go v0.115.1 // indirect + cloud.google.com/go/auth v0.9.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/bigtable v1.30.0 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.12 // indirect + cloud.google.com/go/iam v1.1.13 // indirect cloud.google.com/go/kms v1.18.4 // indirect cloud.google.com/go/logging v1.11.0 // indirect - cloud.google.com/go/longrunning v0.5.11 // indirect - cloud.google.com/go/monitoring v1.20.3 // indirect - cloud.google.com/go/storage v1.42.0 // indirect + cloud.google.com/go/longrunning v0.5.12 // indirect + cloud.google.com/go/monitoring v1.20.4 // indirect + cloud.google.com/go/storage v1.43.0 // indirect dario.cat/mergo v1.0.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect @@ -45,7 +45,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/BurntSushi/toml v1.2.1 // indirect - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.71.0 // indirect + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.72.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/semver/v3 v3.2.1 // indirect @@ -111,7 +111,7 @@ require ( github.com/gofrs/uuid v4.2.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/glog v1.2.0 // indirect + github.com/golang/glog v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.6.0 // indirect @@ -241,34 +241,34 @@ require ( github.com/yuin/goldmark v1.7.4 // indirect github.com/zclconf/go-cty v1.14.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/sdk v1.24.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect go.uber.org/atomic v1.9.0 // indirect go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect gocloud.dev v0.37.0 // indirect gocloud.dev/secrets/hashivault v0.37.0 // indirect - golang.org/x/crypto v0.25.0 // indirect + golang.org/x/crypto v0.26.0 // indirect golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 // indirect golang.org/x/mod v0.18.0 // indirect - golang.org/x/net v0.27.0 // indirect + golang.org/x/net v0.28.0 // indirect golang.org/x/oauth2 v0.22.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/term v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/term v0.23.0 // indirect + golang.org/x/text v0.17.0 // indirect golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.22.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/grpc v1.64.1 // indirect + google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/provider/go.sum b/provider/go.sum index 73a8270d71..d846454485 100644 --- a/provider/go.sum +++ b/provider/go.sum @@ -47,8 +47,8 @@ cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5x cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= cloud.google.com/go v0.110.9/go.mod h1:rpxevX/0Lqvlbc88b7Sc1SPNdyK1riNBTUU6JXhYNpM= cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= @@ -165,10 +165,10 @@ cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav cloud.google.com/go/assuredworkloads v1.11.2/go.mod h1:O1dfr+oZJMlE6mw0Bp0P1KZSlj5SghMBvTpZqIcUAW4= cloud.google.com/go/assuredworkloads v1.11.3/go.mod h1:vEjfTKYyRUaIeA0bsGJceFV2JKpVRgyG2op3jfa59Zs= cloud.google.com/go/assuredworkloads v1.11.4/go.mod h1:4pwwGNwy1RP0m+y12ef3Q/8PaiWrIDQ6nD2E8kvWI9U= -cloud.google.com/go/auth v0.8.0 h1:y8jUJLl/Fg+qNBWxP/Hox2ezJvjkrPb952PC1p0G6A4= -cloud.google.com/go/auth v0.8.0/go.mod h1:qGVp/Y3kDRSDZ5gFD/XPUfYQ9xW1iI7q8RIRoCyBbJc= -cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= -cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= +cloud.google.com/go/auth v0.9.0 h1:cYhKl1JUhynmxjXfrk4qdPc6Amw7i+GC9VLflgT0p5M= +cloud.google.com/go/auth v0.9.0/go.mod h1:2HsApZBr9zGZhC9QAXsYVYaWk8kNUt37uny+XVKi7wM= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= @@ -222,8 +222,8 @@ cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6Pm cloud.google.com/go/bigquery v1.55.0/go.mod h1:9Y5I3PN9kQWuid6183JFhOGOW3GcirA5LpsKCUn+2ec= cloud.google.com/go/bigquery v1.56.0/go.mod h1:KDcsploXTEY7XT3fDQzMUZlpQLHzE4itubHrnmhUrZA= cloud.google.com/go/bigquery v1.57.1/go.mod h1:iYzC0tGVWt1jqSzBHqCr3lrRn0u13E8e+AqowBsDgug= -cloud.google.com/go/bigtable v1.29.0 h1:2CnFjKPwjpZMZdTi2RpppvxzD80zKzDYrLYEQw/NnAs= -cloud.google.com/go/bigtable v1.29.0/go.mod h1:5p909nNdWaNUcWs6KGZO8mI5HUovstlmrIi7+eA5PTQ= +cloud.google.com/go/bigtable v1.30.0 h1:w+N3/WcCDVuKAMvBCD734795ElyjRVaOgOihBRvnWPM= +cloud.google.com/go/bigtable v1.30.0/go.mod h1:VVl6B9pDrmTmSP5KD65KU/tWk3aCHksaNnVt471BN2o= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= @@ -602,8 +602,8 @@ cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+K cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE= cloud.google.com/go/iam v1.1.4/go.mod h1:l/rg8l1AaA+VFMho/HYx2Vv6xinPSLMF8qfhRPIZ0L8= cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= -cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= +cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= +cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= @@ -676,8 +676,8 @@ cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHS cloud.google.com/go/longrunning v0.5.2/go.mod h1:nqo6DQbNV2pXhGDbDMoN2bWz68MjZUzqv2YttZiveCs= cloud.google.com/go/longrunning v0.5.3/go.mod h1:y/0ga59EYu58J6SHmmQOvekvND2qODbu8ywBBW7EK7Y= cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= -cloud.google.com/go/longrunning v0.5.11 h1:Havn1kGjz3whCfoD8dxMLP73Ph5w+ODyZB9RUsDxtGk= -cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4= +cloud.google.com/go/longrunning v0.5.12 h1:5LqSIdERr71CqfUsFlJdBpOkBH8FBCFD7P1nTWy3TYE= +cloud.google.com/go/longrunning v0.5.12/go.mod h1:S5hMV8CDJ6r50t2ubVJSKQVv5u0rmik5//KgLO3k4lU= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= @@ -729,8 +729,8 @@ cloud.google.com/go/monitoring v1.16.0/go.mod h1:Ptp15HgAyM1fNICAojDMoNc/wUmn67m cloud.google.com/go/monitoring v1.16.1/go.mod h1:6HsxddR+3y9j+o/cMJH6q/KJ/CBTvM/38L/1m7bTRJ4= cloud.google.com/go/monitoring v1.16.2/go.mod h1:B44KGwi4ZCF8Rk/5n+FWeispDXoKSk9oss2QNlXJBgc= cloud.google.com/go/monitoring v1.16.3/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= -cloud.google.com/go/monitoring v1.20.3 h1:v/7MXFxYrhXLEZ9sSfwXdlTLLB/xrU7xTyYjY5acynQ= -cloud.google.com/go/monitoring v1.20.3/go.mod h1:GPIVIdNznIdGqEjtRKQWTLcUeRnPjZW85szouimiczU= +cloud.google.com/go/monitoring v1.20.4 h1:zwcViK7mT9SV0kzKqLOI3spRadvsmvw/R9z1MHNeC0E= +cloud.google.com/go/monitoring v1.20.4/go.mod h1:v7F/UcLRw15EX7xq565N7Ae5tnYEE28+Cl717aTXG4c= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= @@ -1025,8 +1025,8 @@ cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= -cloud.google.com/go/storage v1.42.0 h1:4QtGpplCVt1wz6g5o1ifXd656P5z+yNgzdw1tVfp0cU= -cloud.google.com/go/storage v1.42.0/go.mod h1:HjMXRFq65pGKFn6hxj6x3HCyR41uSB72Z0SO/Vn6JFQ= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= @@ -1179,8 +1179,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.71.0 h1:vRKCLiR3faPmXAoqSdwXLv28/kygggzaKXzgdm6GXhg= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.71.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.72.0 h1:VodSRLhOrb8hhRbPre275EreP4vTiaejdBcvd2MCtX4= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.72.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= @@ -1480,8 +1480,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= -github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= -github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= +github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -2150,20 +2150,20 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/sdk/metric v1.24.0 h1:yyMQrPzF+k88/DbH7o4FMAs80puqd+9osbiBrJrz/w8= -go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= +go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -2211,8 +2211,8 @@ golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2358,8 +2358,8 @@ golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2531,8 +2531,8 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2553,8 +2553,8 @@ golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2576,8 +2576,8 @@ golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2745,8 +2745,8 @@ google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvy google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= google.golang.org/api v0.139.0/go.mod h1:CVagp6Eekz9CjGZ718Z+sloknzkDJE7Vc1Ckj9+viBk= google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= -google.golang.org/api v0.191.0 h1:cJcF09Z+4HAB2t5qTQM1ZtfL/PemsLFkcFG67qq2afk= -google.golang.org/api v0.191.0/go.mod h1:tD5dsFGxFza0hnQveGfVk9QQYKcfp+VzgRqyXFxE0+E= +google.golang.org/api v0.193.0 h1:eOGDoJFsLU+HpCBaDJex2fWiYujAw9KbXgpOAMePoUs= +google.golang.org/api v0.193.0/go.mod h1:Po3YMV1XZx+mTku3cfJrlIYR03wiGrCOsdpC67hjZvw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2909,8 +2909,8 @@ google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M= +google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 h1:oLiyxGgE+rt22duwci1+TG7bg2/L1LQsXwfjPlmuJA0= +google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142/go.mod h1:G11eXq53iI5Q+kyNOmCvnzBaxEA2Q/Ik5Tj7nqBE8j4= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= @@ -2928,8 +2928,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go. google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405/go.mod h1:oT32Z4o8Zv2xPQTg0pbVaPr0MPOH6f14RgXt7zfIpwg= google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230807174057-1744710a1577/go.mod h1:NjCQG/D8JandXxM57PZbAJL1DCNL6EypA0vPPwfsc7c= google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw= @@ -2950,8 +2950,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -3002,8 +3002,8 @@ google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSs google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= -google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= -google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/provider/resources.go b/provider/resources.go index 45a80cfd31..a988ec59ad 100644 --- a/provider/resources.go +++ b/provider/resources.go @@ -463,7 +463,7 @@ func Provider() tfbridge.ProviderInfo { shimv2.NewProvider(gcpProvider.Provider(), shimv2.WithPlanResourceChange(func(_ string) bool { return true }), ), - gcpPFProvider.New(version.Version)) // this probably should be TF version but it does not seem to matter + gcpPFProvider.New()) // We should only run the validation once to avoid duplicating the reported errors. var credentialsValidationRun atomic.Bool diff --git a/sdk/dotnet/Alloydb/Cluster.cs b/sdk/dotnet/Alloydb/Cluster.cs index 3bd837d9ff..0429cf9606 100644 --- a/sdk/dotnet/Alloydb/Cluster.cs +++ b/sdk/dotnet/Alloydb/Cluster.cs @@ -303,6 +303,7 @@ public partial class Cluster : global::Pulumi.CustomResource /// Policy to determine if the cluster should be deleted forcefully. /// Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. /// Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + /// Possible values: DEFAULT, FORCE /// [Output("deletionPolicy")] public Output DeletionPolicy { get; private set; } = null!; @@ -450,6 +451,20 @@ public partial class Cluster : global::Pulumi.CustomResource [Output("state")] public Output State { get; private set; } = null!; + /// + /// The subscrition type of cluster. + /// Possible values are: `TRIAL`, `STANDARD`. + /// + [Output("subscriptionType")] + public Output SubscriptionType { get; private set; } = null!; + + /// + /// Contains information and all metadata related to TRIAL clusters. + /// Structure is documented below. + /// + [Output("trialMetadatas")] + public Output> TrialMetadatas { get; private set; } = null!; + /// /// The system-generated UID of the resource. /// @@ -562,6 +577,7 @@ public InputMap Annotations /// Policy to determine if the cluster should be deleted forcefully. /// Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. /// Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + /// Possible values: DEFAULT, FORCE /// [Input("deletionPolicy")] public Input? DeletionPolicy { get; set; } @@ -664,6 +680,13 @@ public InputMap Labels [Input("secondaryConfig")] public Input? SecondaryConfig { get; set; } + /// + /// The subscrition type of cluster. + /// Possible values are: `TRIAL`, `STANDARD`. + /// + [Input("subscriptionType")] + public Input? SubscriptionType { get; set; } + public ClusterArgs() { } @@ -753,6 +776,7 @@ public InputList ContinuousBackupInfo /// Policy to determine if the cluster should be deleted forcefully. /// Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. /// Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + /// Possible values: DEFAULT, FORCE /// [Input("deletionPolicy")] public Input? DeletionPolicy { get; set; } @@ -943,6 +967,26 @@ public InputMap PulumiLabels [Input("state")] public Input? State { get; set; } + /// + /// The subscrition type of cluster. + /// Possible values are: `TRIAL`, `STANDARD`. + /// + [Input("subscriptionType")] + public Input? SubscriptionType { get; set; } + + [Input("trialMetadatas")] + private InputList? _trialMetadatas; + + /// + /// Contains information and all metadata related to TRIAL clusters. + /// Structure is documented below. + /// + public InputList TrialMetadatas + { + get => _trialMetadatas ?? (_trialMetadatas = new InputList()); + set => _trialMetadatas = value; + } + /// /// The system-generated UID of the resource. /// diff --git a/sdk/dotnet/Alloydb/Inputs/ClusterTrialMetadataArgs.cs b/sdk/dotnet/Alloydb/Inputs/ClusterTrialMetadataArgs.cs new file mode 100644 index 0000000000..457a9c02ea --- /dev/null +++ b/sdk/dotnet/Alloydb/Inputs/ClusterTrialMetadataArgs.cs @@ -0,0 +1,44 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Alloydb.Inputs +{ + + public sealed class ClusterTrialMetadataArgs : global::Pulumi.ResourceArgs + { + /// + /// End time of the trial cluster. + /// + [Input("endTime")] + public Input? EndTime { get; set; } + + /// + /// Grace end time of the trial cluster. + /// + [Input("graceEndTime")] + public Input? GraceEndTime { get; set; } + + /// + /// Start time of the trial cluster. + /// + [Input("startTime")] + public Input? StartTime { get; set; } + + /// + /// Upgrade time of the trial cluster to standard cluster. + /// + [Input("upgradeTime")] + public Input? UpgradeTime { get; set; } + + public ClusterTrialMetadataArgs() + { + } + public static new ClusterTrialMetadataArgs Empty => new ClusterTrialMetadataArgs(); + } +} diff --git a/sdk/dotnet/Alloydb/Inputs/ClusterTrialMetadataGetArgs.cs b/sdk/dotnet/Alloydb/Inputs/ClusterTrialMetadataGetArgs.cs new file mode 100644 index 0000000000..cc57c69bdb --- /dev/null +++ b/sdk/dotnet/Alloydb/Inputs/ClusterTrialMetadataGetArgs.cs @@ -0,0 +1,44 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Alloydb.Inputs +{ + + public sealed class ClusterTrialMetadataGetArgs : global::Pulumi.ResourceArgs + { + /// + /// End time of the trial cluster. + /// + [Input("endTime")] + public Input? EndTime { get; set; } + + /// + /// Grace end time of the trial cluster. + /// + [Input("graceEndTime")] + public Input? GraceEndTime { get; set; } + + /// + /// Start time of the trial cluster. + /// + [Input("startTime")] + public Input? StartTime { get; set; } + + /// + /// Upgrade time of the trial cluster to standard cluster. + /// + [Input("upgradeTime")] + public Input? UpgradeTime { get; set; } + + public ClusterTrialMetadataGetArgs() + { + } + public static new ClusterTrialMetadataGetArgs Empty => new ClusterTrialMetadataGetArgs(); + } +} diff --git a/sdk/dotnet/Alloydb/Outputs/ClusterTrialMetadata.cs b/sdk/dotnet/Alloydb/Outputs/ClusterTrialMetadata.cs new file mode 100644 index 0000000000..2eacaf480c --- /dev/null +++ b/sdk/dotnet/Alloydb/Outputs/ClusterTrialMetadata.cs @@ -0,0 +1,49 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Alloydb.Outputs +{ + + [OutputType] + public sealed class ClusterTrialMetadata + { + /// + /// End time of the trial cluster. + /// + public readonly string? EndTime; + /// + /// Grace end time of the trial cluster. + /// + public readonly string? GraceEndTime; + /// + /// Start time of the trial cluster. + /// + public readonly string? StartTime; + /// + /// Upgrade time of the trial cluster to standard cluster. + /// + public readonly string? UpgradeTime; + + [OutputConstructor] + private ClusterTrialMetadata( + string? endTime, + + string? graceEndTime, + + string? startTime, + + string? upgradeTime) + { + EndTime = endTime; + GraceEndTime = graceEndTime; + StartTime = startTime; + UpgradeTime = upgradeTime; + } + } +} diff --git a/sdk/dotnet/AssuredWorkloads/Workload.cs b/sdk/dotnet/AssuredWorkloads/Workload.cs index 30e4691cda..b2dcf0fd33 100644 --- a/sdk/dotnet/AssuredWorkloads/Workload.cs +++ b/sdk/dotnet/AssuredWorkloads/Workload.cs @@ -41,7 +41,7 @@ namespace Pulumi.Gcp.AssuredWorkloads /// { /// new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs /// { - /// DisplayName = "folder-display-name", + /// DisplayName = "{{name}}", /// ResourceType = "CONSUMER_FOLDER", /// }, /// new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs @@ -110,6 +110,56 @@ namespace Pulumi.Gcp.AssuredWorkloads /// /// }); /// ``` + /// ### Split_billing_partner_workload + /// A Split billing partner test of the assuredworkloads api + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var primary = new Gcp.AssuredWorkloads.Workload("primary", new() + /// { + /// ComplianceRegime = "ASSURED_WORKLOADS_FOR_PARTNERS", + /// DisplayName = "display", + /// Location = "europe-west8", + /// Organization = "123456789", + /// BillingAccount = "billingAccounts/000000-0000000-0000000-000000", + /// Partner = "SOVEREIGN_CONTROLS_BY_PSN", + /// PartnerPermissions = new Gcp.AssuredWorkloads.Inputs.WorkloadPartnerPermissionsArgs + /// { + /// AssuredWorkloadsMonitoring = true, + /// DataLogsViewer = true, + /// ServiceAccessApprover = true, + /// }, + /// PartnerServicesBillingAccount = "billingAccounts/01BF3F-2C6DE5-30C607", + /// ResourceSettings = new[] + /// { + /// new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs + /// { + /// ResourceType = "CONSUMER_FOLDER", + /// }, + /// new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs + /// { + /// ResourceType = "ENCRYPTION_KEYS_PROJECT", + /// }, + /// new Gcp.AssuredWorkloads.Inputs.WorkloadResourceSettingArgs + /// { + /// ResourceId = "ring", + /// ResourceType = "KEYRING", + /// }, + /// }, + /// ViolationNotificationsEnabled = true, + /// Labels = + /// { + /// { "label-one", "value-one" }, + /// }, + /// }); + /// + /// }); + /// ``` /// /// ## Import /// @@ -139,7 +189,7 @@ public partial class Workload : global::Pulumi.CustomResource public Output BillingAccount { get; private set; } = null!; /// - /// Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + /// Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS /// [Output("complianceRegime")] public Output ComplianceRegime { get; private set; } = null!; @@ -230,7 +280,7 @@ public partial class Workload : global::Pulumi.CustomResource public Output Organization { get; private set; } = null!; /// - /// Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + /// Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM /// [Output("partner")] public Output Partner { get; private set; } = null!; @@ -241,6 +291,12 @@ public partial class Workload : global::Pulumi.CustomResource [Output("partnerPermissions")] public Output PartnerPermissions { get; private set; } = null!; + /// + /// Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + /// + [Output("partnerServicesBillingAccount")] + public Output PartnerServicesBillingAccount { get; private set; } = null!; + /// /// Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} /// @@ -335,7 +391,7 @@ public sealed class WorkloadArgs : global::Pulumi.ResourceArgs public Input? BillingAccount { get; set; } /// - /// Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + /// Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS /// [Input("complianceRegime", required: true)] public Input ComplianceRegime { get; set; } = null!; @@ -390,7 +446,7 @@ public InputMap Labels public Input Organization { get; set; } = null!; /// - /// Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + /// Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM /// [Input("partner")] public Input? Partner { get; set; } @@ -401,6 +457,12 @@ public InputMap Labels [Input("partnerPermissions")] public Input? PartnerPermissions { get; set; } + /// + /// Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + /// + [Input("partnerServicesBillingAccount")] + public Input? PartnerServicesBillingAccount { get; set; } + /// /// Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} /// @@ -440,7 +502,7 @@ public sealed class WorkloadState : global::Pulumi.ResourceArgs public Input? BillingAccount { get; set; } /// - /// Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + /// Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS /// [Input("complianceRegime")] public Input? ComplianceRegime { get; set; } @@ -565,7 +627,7 @@ public InputMap Labels public Input? Organization { get; set; } /// - /// Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + /// Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM /// [Input("partner")] public Input? Partner { get; set; } @@ -576,6 +638,12 @@ public InputMap Labels [Input("partnerPermissions")] public Input? PartnerPermissions { get; set; } + /// + /// Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + /// + [Input("partnerServicesBillingAccount")] + public Input? PartnerServicesBillingAccount { get; set; } + /// /// Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} /// diff --git a/sdk/dotnet/BackupDisasterRecovery/BackupVault.cs b/sdk/dotnet/BackupDisasterRecovery/BackupVault.cs new file mode 100644 index 0000000000..3ef8667ea8 --- /dev/null +++ b/sdk/dotnet/BackupDisasterRecovery/BackupVault.cs @@ -0,0 +1,593 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BackupDisasterRecovery +{ + /// + /// ## Example Usage + /// + /// ### Backup Dr Backup Vault Full + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var backup_vault_test = new Gcp.BackupDisasterRecovery.BackupVault("backup-vault-test", new() + /// { + /// Location = "us-central1", + /// BackupVaultId = "backup-vault-test", + /// Description = "This is a second backup vault built by Terraform.", + /// BackupMinimumEnforcedRetentionDuration = "100000s", + /// Labels = + /// { + /// { "foo", "bar1" }, + /// { "bar", "baz1" }, + /// }, + /// Annotations = + /// { + /// { "annotations1", "bar1" }, + /// { "annotations2", "baz1" }, + /// }, + /// ForceUpdate = true, + /// ForceDelete = true, + /// AllowMissing = true, + /// }); + /// + /// }); + /// ``` + /// + /// ## Import + /// + /// BackupVault can be imported using any of these accepted formats: + /// + /// * `projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}` + /// + /// * `{{project}}/{{location}}/{{backup_vault_id}}` + /// + /// * `{{location}}/{{backup_vault_id}}` + /// + /// When using the `pulumi import` command, BackupVault can be imported using one of the formats above. For example: + /// + /// ```sh + /// $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}} + /// ``` + /// + /// ```sh + /// $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default {{project}}/{{location}}/{{backup_vault_id}} + /// ``` + /// + /// ```sh + /// $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default {{location}}/{{backup_vault_id}} + /// ``` + /// + [GcpResourceType("gcp:backupdisasterrecovery/backupVault:BackupVault")] + public partial class BackupVault : global::Pulumi.CustomResource + { + /// + /// Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + /// + [Output("allowMissing")] + public Output AllowMissing { get; private set; } = null!; + + /// + /// Optional. User annotations. See https://google.aip.dev/128#annotations + /// Stores small amounts of arbitrary data. + /// **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + /// Please refer to the field `effective_annotations` for all of the annotations present on the resource. + /// + [Output("annotations")] + public Output?> Annotations { get; private set; } = null!; + + /// + /// Output only. The number of backups in this backup vault. + /// + [Output("backupCount")] + public Output BackupCount { get; private set; } = null!; + + /// + /// Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + /// + [Output("backupMinimumEnforcedRetentionDuration")] + public Output BackupMinimumEnforcedRetentionDuration { get; private set; } = null!; + + /// + /// Required. ID of the requesting object. + /// + /// + /// - - - + /// + [Output("backupVaultId")] + public Output BackupVaultId { get; private set; } = null!; + + /// + /// Output only. The time when the instance was created. + /// + [Output("createTime")] + public Output CreateTime { get; private set; } = null!; + + /// + /// Output only. Set to true when there are no backups nested under this resource. + /// + [Output("deletable")] + public Output Deletable { get; private set; } = null!; + + /// + /// Optional. The description of the BackupVault instance (2048 characters or less). + /// + [Output("description")] + public Output Description { get; private set; } = null!; + + [Output("effectiveAnnotations")] + public Output> EffectiveAnnotations { get; private set; } = null!; + + /// + /// All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + /// + [Output("effectiveLabels")] + public Output> EffectiveLabels { get; private set; } = null!; + + /// + /// Optional. Time after which the BackupVault resource is locked. + /// + [Output("effectiveTime")] + public Output EffectiveTime { get; private set; } = null!; + + /// + /// Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + /// + [Output("etag")] + public Output Etag { get; private set; } = null!; + + /// + /// If set, the following restrictions against deletion of the backup vault instance can be overridden: + /// * deletion of a backup vault instance containing no backups, but still containing empty datasources. + /// * deletion of a backup vault instance that is being referenced by an active backup plan. + /// + [Output("forceDelete")] + public Output ForceDelete { get; private set; } = null!; + + /// + /// If set, allow update to extend the minimum enforced retention for backup vault. This overrides + /// the restriction against conflicting retention periods. This conflict may occur when the + /// expiration schedule defined by the associated backup plan is shorter than the minimum + /// retention set by the backup vault. + /// + [Output("forceUpdate")] + public Output ForceUpdate { get; private set; } = null!; + + /// + /// Optional. Resource labels to represent user provided metadata. + /// **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + /// Please refer to the field `effective_labels` for all of the labels present on the resource. + /// + [Output("labels")] + public Output?> Labels { get; private set; } = null!; + + /// + /// The GCP location for the backup vault. + /// + [Output("location")] + public Output Location { get; private set; } = null!; + + /// + /// Output only. Identifier. The resource name. + /// + [Output("name")] + public Output Name { get; private set; } = null!; + + /// + /// The ID of the project in which the resource belongs. + /// If it is not provided, the provider project is used. + /// + [Output("project")] + public Output Project { get; private set; } = null!; + + /// + /// The combination of labels configured directly on the resource + /// and default labels configured on the provider. + /// + [Output("pulumiLabels")] + public Output> PulumiLabels { get; private set; } = null!; + + /// + /// Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + /// + [Output("serviceAccount")] + public Output ServiceAccount { get; private set; } = null!; + + /// + /// Output only. The BackupVault resource instance state. + /// Possible values: + /// STATE_UNSPECIFIED + /// CREATING + /// ACTIVE + /// DELETING + /// ERROR + /// + [Output("state")] + public Output State { get; private set; } = null!; + + /// + /// Output only. Total size of the storage used by all backup resources. + /// + [Output("totalStoredBytes")] + public Output TotalStoredBytes { get; private set; } = null!; + + /// + /// Output only. Output only Immutable after resource creation until resource deletion. + /// + [Output("uid")] + public Output Uid { get; private set; } = null!; + + /// + /// Output only. The time when the instance was updated. + /// + [Output("updateTime")] + public Output UpdateTime { get; private set; } = null!; + + + /// + /// Create a BackupVault resource with the given unique name, arguments, and options. + /// + /// + /// The unique name of the resource + /// The arguments used to populate this resource's properties + /// A bag of options that control this resource's behavior + public BackupVault(string name, BackupVaultArgs args, CustomResourceOptions? options = null) + : base("gcp:backupdisasterrecovery/backupVault:BackupVault", name, args ?? new BackupVaultArgs(), MakeResourceOptions(options, "")) + { + } + + private BackupVault(string name, Input id, BackupVaultState? state = null, CustomResourceOptions? options = null) + : base("gcp:backupdisasterrecovery/backupVault:BackupVault", name, state, MakeResourceOptions(options, id)) + { + } + + private static CustomResourceOptions MakeResourceOptions(CustomResourceOptions? options, Input? id) + { + var defaultOptions = new CustomResourceOptions + { + Version = Utilities.Version, + AdditionalSecretOutputs = + { + "effectiveLabels", + "pulumiLabels", + }, + }; + var merged = CustomResourceOptions.Merge(defaultOptions, options); + // Override the ID if one was specified for consistency with other language SDKs. + merged.Id = id ?? merged.Id; + return merged; + } + /// + /// Get an existing BackupVault resource's state with the given name, ID, and optional extra + /// properties used to qualify the lookup. + /// + /// + /// The unique name of the resulting resource. + /// The unique provider ID of the resource to lookup. + /// Any extra arguments used during the lookup. + /// A bag of options that control this resource's behavior + public static BackupVault Get(string name, Input id, BackupVaultState? state = null, CustomResourceOptions? options = null) + { + return new BackupVault(name, id, state, options); + } + } + + public sealed class BackupVaultArgs : global::Pulumi.ResourceArgs + { + /// + /// Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + /// + [Input("allowMissing")] + public Input? AllowMissing { get; set; } + + [Input("annotations")] + private InputMap? _annotations; + + /// + /// Optional. User annotations. See https://google.aip.dev/128#annotations + /// Stores small amounts of arbitrary data. + /// **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + /// Please refer to the field `effective_annotations` for all of the annotations present on the resource. + /// + public InputMap Annotations + { + get => _annotations ?? (_annotations = new InputMap()); + set => _annotations = value; + } + + /// + /// Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + /// + [Input("backupMinimumEnforcedRetentionDuration", required: true)] + public Input BackupMinimumEnforcedRetentionDuration { get; set; } = null!; + + /// + /// Required. ID of the requesting object. + /// + /// + /// - - - + /// + [Input("backupVaultId", required: true)] + public Input BackupVaultId { get; set; } = null!; + + /// + /// Optional. The description of the BackupVault instance (2048 characters or less). + /// + [Input("description")] + public Input? Description { get; set; } + + /// + /// Optional. Time after which the BackupVault resource is locked. + /// + [Input("effectiveTime")] + public Input? EffectiveTime { get; set; } + + /// + /// If set, the following restrictions against deletion of the backup vault instance can be overridden: + /// * deletion of a backup vault instance containing no backups, but still containing empty datasources. + /// * deletion of a backup vault instance that is being referenced by an active backup plan. + /// + [Input("forceDelete")] + public Input? ForceDelete { get; set; } + + /// + /// If set, allow update to extend the minimum enforced retention for backup vault. This overrides + /// the restriction against conflicting retention periods. This conflict may occur when the + /// expiration schedule defined by the associated backup plan is shorter than the minimum + /// retention set by the backup vault. + /// + [Input("forceUpdate")] + public Input? ForceUpdate { get; set; } + + [Input("labels")] + private InputMap? _labels; + + /// + /// Optional. Resource labels to represent user provided metadata. + /// **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + /// Please refer to the field `effective_labels` for all of the labels present on the resource. + /// + public InputMap Labels + { + get => _labels ?? (_labels = new InputMap()); + set => _labels = value; + } + + /// + /// The GCP location for the backup vault. + /// + [Input("location", required: true)] + public Input Location { get; set; } = null!; + + /// + /// The ID of the project in which the resource belongs. + /// If it is not provided, the provider project is used. + /// + [Input("project")] + public Input? Project { get; set; } + + public BackupVaultArgs() + { + } + public static new BackupVaultArgs Empty => new BackupVaultArgs(); + } + + public sealed class BackupVaultState : global::Pulumi.ResourceArgs + { + /// + /// Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + /// + [Input("allowMissing")] + public Input? AllowMissing { get; set; } + + [Input("annotations")] + private InputMap? _annotations; + + /// + /// Optional. User annotations. See https://google.aip.dev/128#annotations + /// Stores small amounts of arbitrary data. + /// **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + /// Please refer to the field `effective_annotations` for all of the annotations present on the resource. + /// + public InputMap Annotations + { + get => _annotations ?? (_annotations = new InputMap()); + set => _annotations = value; + } + + /// + /// Output only. The number of backups in this backup vault. + /// + [Input("backupCount")] + public Input? BackupCount { get; set; } + + /// + /// Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + /// + [Input("backupMinimumEnforcedRetentionDuration")] + public Input? BackupMinimumEnforcedRetentionDuration { get; set; } + + /// + /// Required. ID of the requesting object. + /// + /// + /// - - - + /// + [Input("backupVaultId")] + public Input? BackupVaultId { get; set; } + + /// + /// Output only. The time when the instance was created. + /// + [Input("createTime")] + public Input? CreateTime { get; set; } + + /// + /// Output only. Set to true when there are no backups nested under this resource. + /// + [Input("deletable")] + public Input? Deletable { get; set; } + + /// + /// Optional. The description of the BackupVault instance (2048 characters or less). + /// + [Input("description")] + public Input? Description { get; set; } + + [Input("effectiveAnnotations")] + private InputMap? _effectiveAnnotations; + public InputMap EffectiveAnnotations + { + get => _effectiveAnnotations ?? (_effectiveAnnotations = new InputMap()); + set => _effectiveAnnotations = value; + } + + [Input("effectiveLabels")] + private InputMap? _effectiveLabels; + + /// + /// All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + /// + public InputMap EffectiveLabels + { + get => _effectiveLabels ?? (_effectiveLabels = new InputMap()); + set + { + var emptySecret = Output.CreateSecret(ImmutableDictionary.Create()); + _effectiveLabels = Output.All(value, emptySecret).Apply(v => v[0]); + } + } + + /// + /// Optional. Time after which the BackupVault resource is locked. + /// + [Input("effectiveTime")] + public Input? EffectiveTime { get; set; } + + /// + /// Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + /// + [Input("etag")] + public Input? Etag { get; set; } + + /// + /// If set, the following restrictions against deletion of the backup vault instance can be overridden: + /// * deletion of a backup vault instance containing no backups, but still containing empty datasources. + /// * deletion of a backup vault instance that is being referenced by an active backup plan. + /// + [Input("forceDelete")] + public Input? ForceDelete { get; set; } + + /// + /// If set, allow update to extend the minimum enforced retention for backup vault. This overrides + /// the restriction against conflicting retention periods. This conflict may occur when the + /// expiration schedule defined by the associated backup plan is shorter than the minimum + /// retention set by the backup vault. + /// + [Input("forceUpdate")] + public Input? ForceUpdate { get; set; } + + [Input("labels")] + private InputMap? _labels; + + /// + /// Optional. Resource labels to represent user provided metadata. + /// **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + /// Please refer to the field `effective_labels` for all of the labels present on the resource. + /// + public InputMap Labels + { + get => _labels ?? (_labels = new InputMap()); + set => _labels = value; + } + + /// + /// The GCP location for the backup vault. + /// + [Input("location")] + public Input? Location { get; set; } + + /// + /// Output only. Identifier. The resource name. + /// + [Input("name")] + public Input? Name { get; set; } + + /// + /// The ID of the project in which the resource belongs. + /// If it is not provided, the provider project is used. + /// + [Input("project")] + public Input? Project { get; set; } + + [Input("pulumiLabels")] + private InputMap? _pulumiLabels; + + /// + /// The combination of labels configured directly on the resource + /// and default labels configured on the provider. + /// + public InputMap PulumiLabels + { + get => _pulumiLabels ?? (_pulumiLabels = new InputMap()); + set + { + var emptySecret = Output.CreateSecret(ImmutableDictionary.Create()); + _pulumiLabels = Output.All(value, emptySecret).Apply(v => v[0]); + } + } + + /// + /// Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + /// + [Input("serviceAccount")] + public Input? ServiceAccount { get; set; } + + /// + /// Output only. The BackupVault resource instance state. + /// Possible values: + /// STATE_UNSPECIFIED + /// CREATING + /// ACTIVE + /// DELETING + /// ERROR + /// + [Input("state")] + public Input? State { get; set; } + + /// + /// Output only. Total size of the storage used by all backup resources. + /// + [Input("totalStoredBytes")] + public Input? TotalStoredBytes { get; set; } + + /// + /// Output only. Output only Immutable after resource creation until resource deletion. + /// + [Input("uid")] + public Input? Uid { get; set; } + + /// + /// Output only. The time when the instance was updated. + /// + [Input("updateTime")] + public Input? UpdateTime { get; set; } + + public BackupVaultState() + { + } + public static new BackupVaultState Empty => new BackupVaultState(); + } +} diff --git a/sdk/dotnet/BigQuery/DataTransferConfig.cs b/sdk/dotnet/BigQuery/DataTransferConfig.cs index 8caf5e405e..4f7ed8365b 100644 --- a/sdk/dotnet/BigQuery/DataTransferConfig.cs +++ b/sdk/dotnet/BigQuery/DataTransferConfig.cs @@ -77,6 +77,78 @@ namespace Pulumi.Gcp.BigQuery /// /// }); /// ``` + /// ### Bigquerydatatransfer Config Cmek + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var project = Gcp.Organizations.GetProject.Invoke(); + /// + /// var permissions = new Gcp.Projects.IAMMember("permissions", new() + /// { + /// Project = project.Apply(getProjectResult => getProjectResult.ProjectId), + /// Role = "roles/iam.serviceAccountTokenCreator", + /// Member = $"serviceAccount:service-{project.Apply(getProjectResult => getProjectResult.Number)}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com", + /// }); + /// + /// var myDataset = new Gcp.BigQuery.Dataset("my_dataset", new() + /// { + /// DatasetId = "example_dataset", + /// FriendlyName = "foo", + /// Description = "bar", + /// Location = "asia-northeast1", + /// }, new CustomResourceOptions + /// { + /// DependsOn = + /// { + /// permissions, + /// }, + /// }); + /// + /// var keyRing = new Gcp.Kms.KeyRing("key_ring", new() + /// { + /// Name = "example-keyring", + /// Location = "us", + /// }); + /// + /// var cryptoKey = new Gcp.Kms.CryptoKey("crypto_key", new() + /// { + /// Name = "example-key", + /// KeyRing = keyRing.Id, + /// }); + /// + /// var queryConfigCmek = new Gcp.BigQuery.DataTransferConfig("query_config_cmek", new() + /// { + /// DisplayName = "", + /// Location = "asia-northeast1", + /// DataSourceId = "scheduled_query", + /// Schedule = "first sunday of quarter 00:00", + /// DestinationDatasetId = myDataset.DatasetId, + /// Params = + /// { + /// { "destination_table_name_template", "my_table" }, + /// { "write_disposition", "WRITE_APPEND" }, + /// { "query", "SELECT name FROM tabl WHERE x = 'y'" }, + /// }, + /// EncryptionConfiguration = new Gcp.BigQuery.Inputs.DataTransferConfigEncryptionConfigurationArgs + /// { + /// KmsKeyName = cryptoKey.Id, + /// }, + /// }, new CustomResourceOptions + /// { + /// DependsOn = + /// { + /// permissions, + /// }, + /// }); + /// + /// }); + /// ``` /// ### Bigquerydatatransfer Config Salesforce /// /// ```csharp @@ -107,9 +179,7 @@ namespace Pulumi.Gcp.BigQuery /// { /// { "connector.authentication.oauth.clientId", "client-id" }, /// { "connector.authentication.oauth.clientSecret", "client-secret" }, - /// { "connector.authentication.username", "username" }, - /// { "connector.authentication.password", "password" }, - /// { "connector.authentication.securityToken", "security-token" }, + /// { "connector.authentication.oauth.myDomain", "MyDomainName" }, /// { "assets", "[\"asset-a\",\"asset-b\"]" }, /// }, /// }); @@ -174,6 +244,13 @@ public partial class DataTransferConfig : global::Pulumi.CustomResource [Output("emailPreferences")] public Output EmailPreferences { get; private set; } = null!; + /// + /// Represents the encryption configuration for a transfer. + /// Structure is documented below. + /// + [Output("encryptionConfiguration")] + public Output EncryptionConfiguration { get; private set; } = null!; + /// /// The geographic location where the transfer config should reside. /// Examples: US, EU, asia-northeast1. The default value is US. @@ -346,6 +423,13 @@ public sealed class DataTransferConfigArgs : global::Pulumi.ResourceArgs [Input("emailPreferences")] public Input? EmailPreferences { get; set; } + /// + /// Represents the encryption configuration for a transfer. + /// Structure is documented below. + /// + [Input("encryptionConfiguration")] + public Input? EncryptionConfiguration { get; set; } + /// /// The geographic location where the transfer config should reside. /// Examples: US, EU, asia-northeast1. The default value is US. @@ -476,6 +560,13 @@ public sealed class DataTransferConfigState : global::Pulumi.ResourceArgs [Input("emailPreferences")] public Input? EmailPreferences { get; set; } + /// + /// Represents the encryption configuration for a transfer. + /// Structure is documented below. + /// + [Input("encryptionConfiguration")] + public Input? EncryptionConfiguration { get; set; } + /// /// The geographic location where the transfer config should reside. /// Examples: US, EU, asia-northeast1. The default value is US. diff --git a/sdk/dotnet/BigQuery/Inputs/DataTransferConfigEncryptionConfigurationArgs.cs b/sdk/dotnet/BigQuery/Inputs/DataTransferConfigEncryptionConfigurationArgs.cs new file mode 100644 index 0000000000..8d552ae1bb --- /dev/null +++ b/sdk/dotnet/BigQuery/Inputs/DataTransferConfigEncryptionConfigurationArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQuery.Inputs +{ + + public sealed class DataTransferConfigEncryptionConfigurationArgs : global::Pulumi.ResourceArgs + { + /// + /// The name of the KMS key used for encrypting BigQuery data. + /// + [Input("kmsKeyName", required: true)] + public Input KmsKeyName { get; set; } = null!; + + public DataTransferConfigEncryptionConfigurationArgs() + { + } + public static new DataTransferConfigEncryptionConfigurationArgs Empty => new DataTransferConfigEncryptionConfigurationArgs(); + } +} diff --git a/sdk/dotnet/BigQuery/Inputs/DataTransferConfigEncryptionConfigurationGetArgs.cs b/sdk/dotnet/BigQuery/Inputs/DataTransferConfigEncryptionConfigurationGetArgs.cs new file mode 100644 index 0000000000..3ef8800568 --- /dev/null +++ b/sdk/dotnet/BigQuery/Inputs/DataTransferConfigEncryptionConfigurationGetArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQuery.Inputs +{ + + public sealed class DataTransferConfigEncryptionConfigurationGetArgs : global::Pulumi.ResourceArgs + { + /// + /// The name of the KMS key used for encrypting BigQuery data. + /// + [Input("kmsKeyName", required: true)] + public Input KmsKeyName { get; set; } = null!; + + public DataTransferConfigEncryptionConfigurationGetArgs() + { + } + public static new DataTransferConfigEncryptionConfigurationGetArgs Empty => new DataTransferConfigEncryptionConfigurationGetArgs(); + } +} diff --git a/sdk/dotnet/BigQuery/Outputs/DataTransferConfigEncryptionConfiguration.cs b/sdk/dotnet/BigQuery/Outputs/DataTransferConfigEncryptionConfiguration.cs new file mode 100644 index 0000000000..66b4e723a5 --- /dev/null +++ b/sdk/dotnet/BigQuery/Outputs/DataTransferConfigEncryptionConfiguration.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQuery.Outputs +{ + + [OutputType] + public sealed class DataTransferConfigEncryptionConfiguration + { + /// + /// The name of the KMS key used for encrypting BigQuery data. + /// + public readonly string KmsKeyName; + + [OutputConstructor] + private DataTransferConfigEncryptionConfiguration(string kmsKeyName) + { + KmsKeyName = kmsKeyName; + } + } +} diff --git a/sdk/dotnet/BigQueryAnalyticsHub/DataExchange.cs b/sdk/dotnet/BigQueryAnalyticsHub/DataExchange.cs index 392c877c91..f189006d5a 100644 --- a/sdk/dotnet/BigQueryAnalyticsHub/DataExchange.cs +++ b/sdk/dotnet/BigQueryAnalyticsHub/DataExchange.cs @@ -40,6 +40,30 @@ namespace Pulumi.Gcp.BigQueryAnalyticsHub /// /// }); /// ``` + /// ### Bigquery Analyticshub Data Exchange Dcr + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var dataExchange = new Gcp.BigQueryAnalyticsHub.DataExchange("data_exchange", new() + /// { + /// Location = "US", + /// DataExchangeId = "dcr_data_exchange", + /// DisplayName = "dcr_data_exchange", + /// Description = "example dcr data exchange", + /// SharingEnvironmentConfig = new Gcp.BigQueryAnalyticsHub.Inputs.DataExchangeSharingEnvironmentConfigArgs + /// { + /// DcrExchangeConfig = null, + /// }, + /// }); + /// + /// }); + /// ``` /// /// ## Import /// @@ -139,6 +163,14 @@ public partial class DataExchange : global::Pulumi.CustomResource [Output("project")] public Output Project { get; private set; } = null!; + /// + /// Configurable data sharing environment option for a data exchange. + /// This field is required for data clean room exchanges. + /// Structure is documented below. + /// + [Output("sharingEnvironmentConfig")] + public Output SharingEnvironmentConfig { get; private set; } = null!; + /// /// Create a DataExchange resource with the given unique name, arguments, and options. @@ -237,6 +269,14 @@ public sealed class DataExchangeArgs : global::Pulumi.ResourceArgs [Input("project")] public Input? Project { get; set; } + /// + /// Configurable data sharing environment option for a data exchange. + /// This field is required for data clean room exchanges. + /// Structure is documented below. + /// + [Input("sharingEnvironmentConfig")] + public Input? SharingEnvironmentConfig { get; set; } + public DataExchangeArgs() { } @@ -310,6 +350,14 @@ public sealed class DataExchangeState : global::Pulumi.ResourceArgs [Input("project")] public Input? Project { get; set; } + /// + /// Configurable data sharing environment option for a data exchange. + /// This field is required for data clean room exchanges. + /// Structure is documented below. + /// + [Input("sharingEnvironmentConfig")] + public Input? SharingEnvironmentConfig { get; set; } + public DataExchangeState() { } diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigArgs.cs b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigArgs.cs new file mode 100644 index 0000000000..a71337be3a --- /dev/null +++ b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigArgs.cs @@ -0,0 +1,32 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQueryAnalyticsHub.Inputs +{ + + public sealed class DataExchangeSharingEnvironmentConfigArgs : global::Pulumi.ResourceArgs + { + /// + /// Data Clean Room (DCR), used for privacy-safe and secured data sharing. + /// + [Input("dcrExchangeConfig")] + public Input? DcrExchangeConfig { get; set; } + + /// + /// Default Analytics Hub data exchange, used for secured data sharing. + /// + [Input("defaultExchangeConfig")] + public Input? DefaultExchangeConfig { get; set; } + + public DataExchangeSharingEnvironmentConfigArgs() + { + } + public static new DataExchangeSharingEnvironmentConfigArgs Empty => new DataExchangeSharingEnvironmentConfigArgs(); + } +} diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs.cs b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs.cs new file mode 100644 index 0000000000..64a70be96e --- /dev/null +++ b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs.cs @@ -0,0 +1,20 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQueryAnalyticsHub.Inputs +{ + + public sealed class DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs : global::Pulumi.ResourceArgs + { + public DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs() + { + } + public static new DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs Empty => new DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs(); + } +} diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfigGetArgs.cs b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfigGetArgs.cs new file mode 100644 index 0000000000..6d4a57c807 --- /dev/null +++ b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfigGetArgs.cs @@ -0,0 +1,20 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQueryAnalyticsHub.Inputs +{ + + public sealed class DataExchangeSharingEnvironmentConfigDcrExchangeConfigGetArgs : global::Pulumi.ResourceArgs + { + public DataExchangeSharingEnvironmentConfigDcrExchangeConfigGetArgs() + { + } + public static new DataExchangeSharingEnvironmentConfigDcrExchangeConfigGetArgs Empty => new DataExchangeSharingEnvironmentConfigDcrExchangeConfigGetArgs(); + } +} diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs.cs b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs.cs new file mode 100644 index 0000000000..78b9132432 --- /dev/null +++ b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs.cs @@ -0,0 +1,20 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQueryAnalyticsHub.Inputs +{ + + public sealed class DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs : global::Pulumi.ResourceArgs + { + public DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs() + { + } + public static new DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs Empty => new DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs(); + } +} diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfigGetArgs.cs b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfigGetArgs.cs new file mode 100644 index 0000000000..2fefcb1146 --- /dev/null +++ b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfigGetArgs.cs @@ -0,0 +1,20 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQueryAnalyticsHub.Inputs +{ + + public sealed class DataExchangeSharingEnvironmentConfigDefaultExchangeConfigGetArgs : global::Pulumi.ResourceArgs + { + public DataExchangeSharingEnvironmentConfigDefaultExchangeConfigGetArgs() + { + } + public static new DataExchangeSharingEnvironmentConfigDefaultExchangeConfigGetArgs Empty => new DataExchangeSharingEnvironmentConfigDefaultExchangeConfigGetArgs(); + } +} diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigGetArgs.cs b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigGetArgs.cs new file mode 100644 index 0000000000..2761dcb726 --- /dev/null +++ b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/DataExchangeSharingEnvironmentConfigGetArgs.cs @@ -0,0 +1,32 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQueryAnalyticsHub.Inputs +{ + + public sealed class DataExchangeSharingEnvironmentConfigGetArgs : global::Pulumi.ResourceArgs + { + /// + /// Data Clean Room (DCR), used for privacy-safe and secured data sharing. + /// + [Input("dcrExchangeConfig")] + public Input? DcrExchangeConfig { get; set; } + + /// + /// Default Analytics Hub data exchange, used for secured data sharing. + /// + [Input("defaultExchangeConfig")] + public Input? DefaultExchangeConfig { get; set; } + + public DataExchangeSharingEnvironmentConfigGetArgs() + { + } + public static new DataExchangeSharingEnvironmentConfigGetArgs Empty => new DataExchangeSharingEnvironmentConfigGetArgs(); + } +} diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetArgs.cs b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetArgs.cs index d5d60d30ef..7f6522a3f7 100644 --- a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetArgs.cs +++ b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetArgs.cs @@ -14,12 +14,23 @@ public sealed class ListingBigqueryDatasetArgs : global::Pulumi.ResourceArgs { /// /// Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 - /// - /// - - - /// [Input("dataset", required: true)] public Input Dataset { get; set; } = null!; + [Input("selectedResources")] + private InputList? _selectedResources; + + /// + /// Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + /// Structure is documented below. + /// + public InputList SelectedResources + { + get => _selectedResources ?? (_selectedResources = new InputList()); + set => _selectedResources = value; + } + public ListingBigqueryDatasetArgs() { } diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetGetArgs.cs b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetGetArgs.cs index 70d52698f5..0ff266e286 100644 --- a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetGetArgs.cs +++ b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetGetArgs.cs @@ -14,12 +14,23 @@ public sealed class ListingBigqueryDatasetGetArgs : global::Pulumi.ResourceArgs { /// /// Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 - /// - /// - - - /// [Input("dataset", required: true)] public Input Dataset { get; set; } = null!; + [Input("selectedResources")] + private InputList? _selectedResources; + + /// + /// Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + /// Structure is documented below. + /// + public InputList SelectedResources + { + get => _selectedResources ?? (_selectedResources = new InputList()); + set => _selectedResources = value; + } + public ListingBigqueryDatasetGetArgs() { } diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetSelectedResourceArgs.cs b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetSelectedResourceArgs.cs new file mode 100644 index 0000000000..7846de34ab --- /dev/null +++ b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetSelectedResourceArgs.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQueryAnalyticsHub.Inputs +{ + + public sealed class ListingBigqueryDatasetSelectedResourceArgs : global::Pulumi.ResourceArgs + { + /// + /// Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + /// + /// - - - + /// + [Input("table")] + public Input? Table { get; set; } + + public ListingBigqueryDatasetSelectedResourceArgs() + { + } + public static new ListingBigqueryDatasetSelectedResourceArgs Empty => new ListingBigqueryDatasetSelectedResourceArgs(); + } +} diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetSelectedResourceGetArgs.cs b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetSelectedResourceGetArgs.cs new file mode 100644 index 0000000000..36a6029c31 --- /dev/null +++ b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingBigqueryDatasetSelectedResourceGetArgs.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQueryAnalyticsHub.Inputs +{ + + public sealed class ListingBigqueryDatasetSelectedResourceGetArgs : global::Pulumi.ResourceArgs + { + /// + /// Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + /// + /// - - - + /// + [Input("table")] + public Input? Table { get; set; } + + public ListingBigqueryDatasetSelectedResourceGetArgs() + { + } + public static new ListingBigqueryDatasetSelectedResourceGetArgs Empty => new ListingBigqueryDatasetSelectedResourceGetArgs(); + } +} diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingRestrictedExportConfigArgs.cs b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingRestrictedExportConfigArgs.cs index 96fa8e8f21..a105d44bde 100644 --- a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingRestrictedExportConfigArgs.cs +++ b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingRestrictedExportConfigArgs.cs @@ -18,6 +18,13 @@ public sealed class ListingRestrictedExportConfigArgs : global::Pulumi.ResourceA [Input("enabled")] public Input? Enabled { get; set; } + /// + /// (Output) + /// If true, restrict direct table access(read api/tabledata.list) on linked table. + /// + [Input("restrictDirectTableAccess")] + public Input? RestrictDirectTableAccess { get; set; } + /// /// If true, restrict export of query result derived from restricted linked dataset table. /// diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingRestrictedExportConfigGetArgs.cs b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingRestrictedExportConfigGetArgs.cs index dcbde38e40..261896e211 100644 --- a/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingRestrictedExportConfigGetArgs.cs +++ b/sdk/dotnet/BigQueryAnalyticsHub/Inputs/ListingRestrictedExportConfigGetArgs.cs @@ -18,6 +18,13 @@ public sealed class ListingRestrictedExportConfigGetArgs : global::Pulumi.Resour [Input("enabled")] public Input? Enabled { get; set; } + /// + /// (Output) + /// If true, restrict direct table access(read api/tabledata.list) on linked table. + /// + [Input("restrictDirectTableAccess")] + public Input? RestrictDirectTableAccess { get; set; } + /// /// If true, restrict export of query result derived from restricted linked dataset table. /// diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Listing.cs b/sdk/dotnet/BigQueryAnalyticsHub/Listing.cs index e30d4ba2b8..8992a22c6a 100644 --- a/sdk/dotnet/BigQueryAnalyticsHub/Listing.cs +++ b/sdk/dotnet/BigQueryAnalyticsHub/Listing.cs @@ -107,6 +107,87 @@ namespace Pulumi.Gcp.BigQueryAnalyticsHub /// /// }); /// ``` + /// ### Bigquery Analyticshub Listing Dcr + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var listing = new Gcp.BigQueryAnalyticsHub.DataExchange("listing", new() + /// { + /// Location = "US", + /// DataExchangeId = "dcr_data_exchange", + /// DisplayName = "dcr_data_exchange", + /// Description = "example dcr data exchange", + /// SharingEnvironmentConfig = new Gcp.BigQueryAnalyticsHub.Inputs.DataExchangeSharingEnvironmentConfigArgs + /// { + /// DcrExchangeConfig = null, + /// }, + /// }); + /// + /// var listingDataset = new Gcp.BigQuery.Dataset("listing", new() + /// { + /// DatasetId = "dcr_listing", + /// FriendlyName = "dcr_listing", + /// Description = "example dcr data exchange", + /// Location = "US", + /// }); + /// + /// var listingTable = new Gcp.BigQuery.Table("listing", new() + /// { + /// DeletionProtection = false, + /// TableId = "dcr_listing", + /// DatasetId = listingDataset.DatasetId, + /// Schema = @"[ + /// { + /// ""name"": ""name"", + /// ""type"": ""STRING"", + /// ""mode"": ""NULLABLE"" + /// }, + /// { + /// ""name"": ""post_abbr"", + /// ""type"": ""STRING"", + /// ""mode"": ""NULLABLE"" + /// }, + /// { + /// ""name"": ""date"", + /// ""type"": ""DATE"", + /// ""mode"": ""NULLABLE"" + /// } + /// ] + /// ", + /// }); + /// + /// var listingListing = new Gcp.BigQueryAnalyticsHub.Listing("listing", new() + /// { + /// Location = "US", + /// DataExchangeId = listing.DataExchangeId, + /// ListingId = "dcr_listing", + /// DisplayName = "dcr_listing", + /// Description = "example dcr data exchange", + /// BigqueryDataset = new Gcp.BigQueryAnalyticsHub.Inputs.ListingBigqueryDatasetArgs + /// { + /// Dataset = listingDataset.Id, + /// SelectedResources = new[] + /// { + /// new Gcp.BigQueryAnalyticsHub.Inputs.ListingBigqueryDatasetSelectedResourceArgs + /// { + /// Table = listingTable.Id, + /// }, + /// }, + /// }, + /// RestrictedExportConfig = new Gcp.BigQueryAnalyticsHub.Inputs.ListingRestrictedExportConfigArgs + /// { + /// Enabled = true, + /// }, + /// }); + /// + /// }); + /// ``` /// /// ## Import /// diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Outputs/DataExchangeSharingEnvironmentConfig.cs b/sdk/dotnet/BigQueryAnalyticsHub/Outputs/DataExchangeSharingEnvironmentConfig.cs new file mode 100644 index 0000000000..9be829efa0 --- /dev/null +++ b/sdk/dotnet/BigQueryAnalyticsHub/Outputs/DataExchangeSharingEnvironmentConfig.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQueryAnalyticsHub.Outputs +{ + + [OutputType] + public sealed class DataExchangeSharingEnvironmentConfig + { + /// + /// Data Clean Room (DCR), used for privacy-safe and secured data sharing. + /// + public readonly Outputs.DataExchangeSharingEnvironmentConfigDcrExchangeConfig? DcrExchangeConfig; + /// + /// Default Analytics Hub data exchange, used for secured data sharing. + /// + public readonly Outputs.DataExchangeSharingEnvironmentConfigDefaultExchangeConfig? DefaultExchangeConfig; + + [OutputConstructor] + private DataExchangeSharingEnvironmentConfig( + Outputs.DataExchangeSharingEnvironmentConfigDcrExchangeConfig? dcrExchangeConfig, + + Outputs.DataExchangeSharingEnvironmentConfigDefaultExchangeConfig? defaultExchangeConfig) + { + DcrExchangeConfig = dcrExchangeConfig; + DefaultExchangeConfig = defaultExchangeConfig; + } + } +} diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Outputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfig.cs b/sdk/dotnet/BigQueryAnalyticsHub/Outputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfig.cs new file mode 100644 index 0000000000..389122c184 --- /dev/null +++ b/sdk/dotnet/BigQueryAnalyticsHub/Outputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfig.cs @@ -0,0 +1,21 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQueryAnalyticsHub.Outputs +{ + + [OutputType] + public sealed class DataExchangeSharingEnvironmentConfigDcrExchangeConfig + { + [OutputConstructor] + private DataExchangeSharingEnvironmentConfigDcrExchangeConfig() + { + } + } +} diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Outputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfig.cs b/sdk/dotnet/BigQueryAnalyticsHub/Outputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfig.cs new file mode 100644 index 0000000000..15763ca0eb --- /dev/null +++ b/sdk/dotnet/BigQueryAnalyticsHub/Outputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfig.cs @@ -0,0 +1,21 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQueryAnalyticsHub.Outputs +{ + + [OutputType] + public sealed class DataExchangeSharingEnvironmentConfigDefaultExchangeConfig + { + [OutputConstructor] + private DataExchangeSharingEnvironmentConfigDefaultExchangeConfig() + { + } + } +} diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Outputs/ListingBigqueryDataset.cs b/sdk/dotnet/BigQueryAnalyticsHub/Outputs/ListingBigqueryDataset.cs index 2a3fce79cf..94fc25d7db 100644 --- a/sdk/dotnet/BigQueryAnalyticsHub/Outputs/ListingBigqueryDataset.cs +++ b/sdk/dotnet/BigQueryAnalyticsHub/Outputs/ListingBigqueryDataset.cs @@ -15,15 +15,22 @@ public sealed class ListingBigqueryDataset { /// /// Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 - /// - /// - - - /// public readonly string Dataset; + /// + /// Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + /// Structure is documented below. + /// + public readonly ImmutableArray SelectedResources; [OutputConstructor] - private ListingBigqueryDataset(string dataset) + private ListingBigqueryDataset( + string dataset, + + ImmutableArray selectedResources) { Dataset = dataset; + SelectedResources = selectedResources; } } } diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Outputs/ListingBigqueryDatasetSelectedResource.cs b/sdk/dotnet/BigQueryAnalyticsHub/Outputs/ListingBigqueryDatasetSelectedResource.cs new file mode 100644 index 0000000000..d15284103d --- /dev/null +++ b/sdk/dotnet/BigQueryAnalyticsHub/Outputs/ListingBigqueryDatasetSelectedResource.cs @@ -0,0 +1,29 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.BigQueryAnalyticsHub.Outputs +{ + + [OutputType] + public sealed class ListingBigqueryDatasetSelectedResource + { + /// + /// Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + /// + /// - - - + /// + public readonly string? Table; + + [OutputConstructor] + private ListingBigqueryDatasetSelectedResource(string? table) + { + Table = table; + } + } +} diff --git a/sdk/dotnet/BigQueryAnalyticsHub/Outputs/ListingRestrictedExportConfig.cs b/sdk/dotnet/BigQueryAnalyticsHub/Outputs/ListingRestrictedExportConfig.cs index df08e85f2e..789f13047a 100644 --- a/sdk/dotnet/BigQueryAnalyticsHub/Outputs/ListingRestrictedExportConfig.cs +++ b/sdk/dotnet/BigQueryAnalyticsHub/Outputs/ListingRestrictedExportConfig.cs @@ -18,6 +18,11 @@ public sealed class ListingRestrictedExportConfig /// public readonly bool? Enabled; /// + /// (Output) + /// If true, restrict direct table access(read api/tabledata.list) on linked table. + /// + public readonly bool? RestrictDirectTableAccess; + /// /// If true, restrict export of query result derived from restricted linked dataset table. /// public readonly bool? RestrictQueryResult; @@ -26,9 +31,12 @@ public sealed class ListingRestrictedExportConfig private ListingRestrictedExportConfig( bool? enabled, + bool? restrictDirectTableAccess, + bool? restrictQueryResult) { Enabled = enabled; + RestrictDirectTableAccess = restrictDirectTableAccess; RestrictQueryResult = restrictQueryResult; } } diff --git a/sdk/dotnet/BigTable/Inputs/TableColumnFamilyArgs.cs b/sdk/dotnet/BigTable/Inputs/TableColumnFamilyArgs.cs index b507e7bddf..eae76f550b 100644 --- a/sdk/dotnet/BigTable/Inputs/TableColumnFamilyArgs.cs +++ b/sdk/dotnet/BigTable/Inputs/TableColumnFamilyArgs.cs @@ -18,6 +18,12 @@ public sealed class TableColumnFamilyArgs : global::Pulumi.ResourceArgs [Input("family", required: true)] public Input Family { get; set; } = null!; + /// + /// The type of the column family. + /// + [Input("type")] + public Input? Type { get; set; } + public TableColumnFamilyArgs() { } diff --git a/sdk/dotnet/BigTable/Inputs/TableColumnFamilyGetArgs.cs b/sdk/dotnet/BigTable/Inputs/TableColumnFamilyGetArgs.cs index c48eab738d..96979a0c2c 100644 --- a/sdk/dotnet/BigTable/Inputs/TableColumnFamilyGetArgs.cs +++ b/sdk/dotnet/BigTable/Inputs/TableColumnFamilyGetArgs.cs @@ -18,6 +18,12 @@ public sealed class TableColumnFamilyGetArgs : global::Pulumi.ResourceArgs [Input("family", required: true)] public Input Family { get; set; } = null!; + /// + /// The type of the column family. + /// + [Input("type")] + public Input? Type { get; set; } + public TableColumnFamilyGetArgs() { } diff --git a/sdk/dotnet/BigTable/Outputs/TableColumnFamily.cs b/sdk/dotnet/BigTable/Outputs/TableColumnFamily.cs index b7a4f8ec4a..b79ffdc70a 100644 --- a/sdk/dotnet/BigTable/Outputs/TableColumnFamily.cs +++ b/sdk/dotnet/BigTable/Outputs/TableColumnFamily.cs @@ -17,11 +17,19 @@ public sealed class TableColumnFamily /// The name of the column family. /// public readonly string Family; + /// + /// The type of the column family. + /// + public readonly string? Type; [OutputConstructor] - private TableColumnFamily(string family) + private TableColumnFamily( + string family, + + string? type) { Family = family; + Type = type; } } } diff --git a/sdk/dotnet/BigTable/Table.cs b/sdk/dotnet/BigTable/Table.cs index d95886182a..285b91c4c6 100644 --- a/sdk/dotnet/BigTable/Table.cs +++ b/sdk/dotnet/BigTable/Table.cs @@ -58,6 +58,24 @@ namespace Pulumi.Gcp.BigTable /// new Gcp.BigTable.Inputs.TableColumnFamilyArgs /// { /// Family = "family-second", + /// Type = "intsum", + /// }, + /// new Gcp.BigTable.Inputs.TableColumnFamilyArgs + /// { + /// Family = "family-third", + /// Type = @" { + /// ""aggregateType"": { + /// ""max"": {}, + /// ""inputType"": { + /// ""int64Type"": { + /// ""encoding"": { + /// ""bigEndianBytes"": {} + /// } + /// } + /// } + /// } + /// } + /// ", /// }, /// }, /// ChangeStreamRetention = "24h0m0s", diff --git a/sdk/dotnet/CertificateAuthority/Authority.cs b/sdk/dotnet/CertificateAuthority/Authority.cs index ce26385bf0..7ff9ad9d8a 100644 --- a/sdk/dotnet/CertificateAuthority/Authority.cs +++ b/sdk/dotnet/CertificateAuthority/Authority.cs @@ -481,7 +481,8 @@ public partial class Authority : global::Pulumi.CustomResource public Output DeletionProtection { get; private set; } = null!; /// - /// Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + /// Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + /// ENABLED, DISABLED, STAGED. /// [Output("desiredState")] public Output DesiredState { get; private set; } = null!; @@ -682,7 +683,8 @@ public sealed class AuthorityArgs : global::Pulumi.ResourceArgs public Input? DeletionProtection { get; set; } /// - /// Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + /// Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + /// ENABLED, DISABLED, STAGED. /// [Input("desiredState")] public Input? DesiredState { get; set; } @@ -824,7 +826,8 @@ public InputList AccessUrls public Input? DeletionProtection { get; set; } /// - /// Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + /// Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + /// ENABLED, DISABLED, STAGED. /// [Input("desiredState")] public Input? DesiredState { get; set; } diff --git a/sdk/dotnet/CertificateManager/Certificate.cs b/sdk/dotnet/CertificateManager/Certificate.cs index 346ba0206a..595bfdec7f 100644 --- a/sdk/dotnet/CertificateManager/Certificate.cs +++ b/sdk/dotnet/CertificateManager/Certificate.cs @@ -529,6 +529,12 @@ public partial class Certificate : global::Pulumi.CustomResource [Output("pulumiLabels")] public Output> PulumiLabels { get; private set; } = null!; + /// + /// The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + /// + [Output("sanDnsnames")] + public Output> SanDnsnames { get; private set; } = null!; + /// /// The scope of the certificate. /// DEFAULT: Certificates with default scope are served from core Google data centers. @@ -769,6 +775,18 @@ public InputMap PulumiLabels } } + [Input("sanDnsnames")] + private InputList? _sanDnsnames; + + /// + /// The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + /// + public InputList SanDnsnames + { + get => _sanDnsnames ?? (_sanDnsnames = new InputList()); + set => _sanDnsnames = value; + } + /// /// The scope of the certificate. /// DEFAULT: Certificates with default scope are served from core Google data centers. diff --git a/sdk/dotnet/CertificateManager/GetCertificates.cs b/sdk/dotnet/CertificateManager/GetCertificates.cs new file mode 100644 index 0000000000..0eea79a5a7 --- /dev/null +++ b/sdk/dotnet/CertificateManager/GetCertificates.cs @@ -0,0 +1,162 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CertificateManager +{ + public static class GetCertificates + { + /// + /// List all certificates within Google Certificate Manager for a given project, region or filter. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @default = Gcp.CertificateManager.GetCertificates.Invoke(); + /// + /// }); + /// ``` + /// + /// ### With A Filter + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @default = Gcp.CertificateManager.GetCertificates.Invoke(new() + /// { + /// Filter = "name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*", + /// }); + /// + /// }); + /// ``` + /// + public static Task InvokeAsync(GetCertificatesArgs? args = null, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.InvokeAsync("gcp:certificatemanager/getCertificates:getCertificates", args ?? new GetCertificatesArgs(), options.WithDefaults()); + + /// + /// List all certificates within Google Certificate Manager for a given project, region or filter. + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @default = Gcp.CertificateManager.GetCertificates.Invoke(); + /// + /// }); + /// ``` + /// + /// ### With A Filter + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @default = Gcp.CertificateManager.GetCertificates.Invoke(new() + /// { + /// Filter = "name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*", + /// }); + /// + /// }); + /// ``` + /// + public static Output Invoke(GetCertificatesInvokeArgs? args = null, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.Invoke("gcp:certificatemanager/getCertificates:getCertificates", args ?? new GetCertificatesInvokeArgs(), options.WithDefaults()); + } + + + public sealed class GetCertificatesArgs : global::Pulumi.InvokeArgs + { + /// + /// Filter expression to restrict the certificates returned. + /// + [Input("filter")] + public string? Filter { get; set; } + + /// + /// The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + /// + [Input("region")] + public string? Region { get; set; } + + public GetCertificatesArgs() + { + } + public static new GetCertificatesArgs Empty => new GetCertificatesArgs(); + } + + public sealed class GetCertificatesInvokeArgs : global::Pulumi.InvokeArgs + { + /// + /// Filter expression to restrict the certificates returned. + /// + [Input("filter")] + public Input? Filter { get; set; } + + /// + /// The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + /// + [Input("region")] + public Input? Region { get; set; } + + public GetCertificatesInvokeArgs() + { + } + public static new GetCertificatesInvokeArgs Empty => new GetCertificatesInvokeArgs(); + } + + + [OutputType] + public sealed class GetCertificatesResult + { + public readonly ImmutableArray Certificates; + public readonly string? Filter; + /// + /// The provider-assigned unique ID for this managed resource. + /// + public readonly string Id; + public readonly string? Region; + + [OutputConstructor] + private GetCertificatesResult( + ImmutableArray certificates, + + string? filter, + + string id, + + string? region) + { + Certificates = certificates; + Filter = filter; + Id = id; + Region = region; + } + } +} diff --git a/sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateManagedAuthorizationAttemptInfoResult.cs b/sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateManagedAuthorizationAttemptInfoResult.cs new file mode 100644 index 0000000000..9d3d5e9be4 --- /dev/null +++ b/sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateManagedAuthorizationAttemptInfoResult.cs @@ -0,0 +1,51 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CertificateManager.Outputs +{ + + [OutputType] + public sealed class GetCertificatesCertificateManagedAuthorizationAttemptInfoResult + { + /// + /// Human readable explanation for reaching the state. Provided to help + /// address the configuration issues. + /// Not guaranteed to be stable. For programmatic access use 'failure_reason' field. + /// + public readonly string Details; + /// + /// Domain name of the authorization attempt. + /// + public readonly string Domain; + /// + /// Reason for failure of the authorization attempt for the domain. + /// + public readonly string FailureReason; + /// + /// State of the domain for managed certificate issuance. + /// + public readonly string State; + + [OutputConstructor] + private GetCertificatesCertificateManagedAuthorizationAttemptInfoResult( + string details, + + string domain, + + string failureReason, + + string state) + { + Details = details; + Domain = domain; + FailureReason = failureReason; + State = state; + } + } +} diff --git a/sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateManagedProvisioningIssueResult.cs b/sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateManagedProvisioningIssueResult.cs new file mode 100644 index 0000000000..efd300339d --- /dev/null +++ b/sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateManagedProvisioningIssueResult.cs @@ -0,0 +1,37 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CertificateManager.Outputs +{ + + [OutputType] + public sealed class GetCertificatesCertificateManagedProvisioningIssueResult + { + /// + /// Human readable explanation about the issue. Provided to help address + /// the configuration issues. + /// Not guaranteed to be stable. For programmatic access use 'reason' field. + /// + public readonly string Details; + /// + /// Reason for provisioning failures. + /// + public readonly string Reason; + + [OutputConstructor] + private GetCertificatesCertificateManagedProvisioningIssueResult( + string details, + + string reason) + { + Details = details; + Reason = reason; + } + } +} diff --git a/sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateManagedResult.cs b/sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateManagedResult.cs new file mode 100644 index 0000000000..2012b9ab7b --- /dev/null +++ b/sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateManagedResult.cs @@ -0,0 +1,67 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CertificateManager.Outputs +{ + + [OutputType] + public sealed class GetCertificatesCertificateManagedResult + { + /// + /// Detailed state of the latest authorization attempt for each domain + /// specified for this Managed Certificate. + /// + public readonly ImmutableArray AuthorizationAttemptInfos; + /// + /// Authorizations that will be used for performing domain authorization. Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + /// + public readonly ImmutableArray DnsAuthorizations; + /// + /// The domains for which a managed SSL certificate will be generated. + /// Wildcard domains are only supported with DNS challenge resolution + /// + public readonly ImmutableArray Domains; + /// + /// The resource name for a CertificateIssuanceConfig used to configure private PKI certificates in the format projects/*/locations/*/certificateIssuanceConfigs/*. + /// If this field is not set, the certificates will instead be publicly signed as documented at https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa. + /// Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + /// + public readonly string IssuanceConfig; + /// + /// Information about issues with provisioning this Managed Certificate. + /// + public readonly ImmutableArray ProvisioningIssues; + /// + /// A state of this Managed Certificate. + /// + public readonly string State; + + [OutputConstructor] + private GetCertificatesCertificateManagedResult( + ImmutableArray authorizationAttemptInfos, + + ImmutableArray dnsAuthorizations, + + ImmutableArray domains, + + string issuanceConfig, + + ImmutableArray provisioningIssues, + + string state) + { + AuthorizationAttemptInfos = authorizationAttemptInfos; + DnsAuthorizations = dnsAuthorizations; + Domains = domains; + IssuanceConfig = issuanceConfig; + ProvisioningIssues = provisioningIssues; + State = state; + } + } +} diff --git a/sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateResult.cs b/sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateResult.cs new file mode 100644 index 0000000000..1306938534 --- /dev/null +++ b/sdk/dotnet/CertificateManager/Outputs/GetCertificatesCertificateResult.cs @@ -0,0 +1,106 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CertificateManager.Outputs +{ + + [OutputType] + public sealed class GetCertificatesCertificateResult + { + /// + /// A human-readable description of the resource. + /// + public readonly string Description; + public readonly ImmutableDictionary EffectiveLabels; + /// + /// Set of label tags associated with the Certificate resource. + /// + /// **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + /// Please refer to the field 'effective_labels' for all of the labels present on the resource. + /// + public readonly ImmutableDictionary Labels; + /// + /// The Certificate Manager location. If not specified, "global" is used. + /// + public readonly string Location; + /// + /// Configuration and state of a Managed Certificate. + /// Certificate Manager provisions and renews Managed Certificates + /// automatically, for as long as it's authorized to do so. + /// + public readonly ImmutableArray Manageds; + /// + /// A user-defined name of the certificate. Certificate names must be unique + /// The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, + /// and all following characters must be a dash, underscore, letter or digit. + /// + public readonly string Name; + /// + /// The ID of the project in which the resource belongs. If it + /// is not provided, the provider project is used. + /// + public readonly string Project; + /// + /// The combination of labels configured directly on the resource + /// and default labels configured on the provider. + /// + public readonly ImmutableDictionary PulumiLabels; + /// + /// The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + /// + public readonly ImmutableArray SanDnsnames; + /// + /// The scope of the certificate. + /// + /// DEFAULT: Certificates with default scope are served from core Google data centers. + /// If unsure, choose this option. + /// + /// EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence. + /// See https://cloud.google.com/vpc/docs/edge-locations. + /// + /// ALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs). + /// See https://cloud.google.com/compute/docs/regions-zones + /// + public readonly string Scope; + + [OutputConstructor] + private GetCertificatesCertificateResult( + string description, + + ImmutableDictionary effectiveLabels, + + ImmutableDictionary labels, + + string location, + + ImmutableArray manageds, + + string name, + + string project, + + ImmutableDictionary pulumiLabels, + + ImmutableArray sanDnsnames, + + string scope) + { + Description = description; + EffectiveLabels = effectiveLabels; + Labels = labels; + Location = location; + Manageds = manageds; + Name = name; + Project = project; + PulumiLabels = pulumiLabels; + SanDnsnames = sanDnsnames; + Scope = scope; + } + } +} diff --git a/sdk/dotnet/CloudBuild/Inputs/WorkerPoolWorkerConfigArgs.cs b/sdk/dotnet/CloudBuild/Inputs/WorkerPoolWorkerConfigArgs.cs index 4ae9dd08f0..3b3b2a7cf2 100644 --- a/sdk/dotnet/CloudBuild/Inputs/WorkerPoolWorkerConfigArgs.cs +++ b/sdk/dotnet/CloudBuild/Inputs/WorkerPoolWorkerConfigArgs.cs @@ -13,13 +13,13 @@ namespace Pulumi.Gcp.CloudBuild.Inputs public sealed class WorkerPoolWorkerConfigArgs : global::Pulumi.ResourceArgs { /// - /// Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + /// Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. /// [Input("diskSizeGb")] public Input? DiskSizeGb { get; set; } /// - /// Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + /// Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. /// [Input("machineType")] public Input? MachineType { get; set; } diff --git a/sdk/dotnet/CloudBuild/Inputs/WorkerPoolWorkerConfigGetArgs.cs b/sdk/dotnet/CloudBuild/Inputs/WorkerPoolWorkerConfigGetArgs.cs index 890a1b0758..95e01761b6 100644 --- a/sdk/dotnet/CloudBuild/Inputs/WorkerPoolWorkerConfigGetArgs.cs +++ b/sdk/dotnet/CloudBuild/Inputs/WorkerPoolWorkerConfigGetArgs.cs @@ -13,13 +13,13 @@ namespace Pulumi.Gcp.CloudBuild.Inputs public sealed class WorkerPoolWorkerConfigGetArgs : global::Pulumi.ResourceArgs { /// - /// Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + /// Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. /// [Input("diskSizeGb")] public Input? DiskSizeGb { get; set; } /// - /// Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + /// Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. /// [Input("machineType")] public Input? MachineType { get; set; } diff --git a/sdk/dotnet/CloudBuild/Outputs/WorkerPoolWorkerConfig.cs b/sdk/dotnet/CloudBuild/Outputs/WorkerPoolWorkerConfig.cs index de5709df2e..884d96f372 100644 --- a/sdk/dotnet/CloudBuild/Outputs/WorkerPoolWorkerConfig.cs +++ b/sdk/dotnet/CloudBuild/Outputs/WorkerPoolWorkerConfig.cs @@ -14,11 +14,11 @@ namespace Pulumi.Gcp.CloudBuild.Outputs public sealed class WorkerPoolWorkerConfig { /// - /// Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + /// Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. /// public readonly int? DiskSizeGb; /// - /// Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + /// Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. /// public readonly string? MachineType; /// diff --git a/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeArgs.cs b/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeArgs.cs index 0efe4d6838..f65e5fd465 100644 --- a/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeArgs.cs +++ b/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeArgs.cs @@ -34,8 +34,7 @@ public sealed class ServiceTemplateSpecVolumeArgs : global::Pulumi.ResourceArgs /// /// A filesystem backed by a Network File System share. This filesystem requires the - /// run.googleapis.com/execution-environment annotation to be set to "gen2" and - /// run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + /// run.googleapis.com/execution-environment annotation to be unset or set to "gen2" /// Structure is documented below. /// [Input("nfs")] diff --git a/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeCsiArgs.cs b/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeCsiArgs.cs index 8ef96163a5..e3c6c4b7e0 100644 --- a/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeCsiArgs.cs +++ b/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeCsiArgs.cs @@ -15,8 +15,7 @@ public sealed class ServiceTemplateSpecVolumeCsiArgs : global::Pulumi.ResourceAr /// /// Unique name representing the type of file system to be created. Cloud Run supports the following values: /// * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - /// run.googleapis.com/execution-environment annotation to be set to "gen2" and - /// run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + /// run.googleapis.com/execution-environment annotation to be unset or set to "gen2" /// [Input("driver", required: true)] public Input Driver { get; set; } = null!; diff --git a/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeCsiGetArgs.cs b/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeCsiGetArgs.cs index 0bd2bc5ec7..2c872375b3 100644 --- a/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeCsiGetArgs.cs +++ b/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeCsiGetArgs.cs @@ -15,8 +15,7 @@ public sealed class ServiceTemplateSpecVolumeCsiGetArgs : global::Pulumi.Resourc /// /// Unique name representing the type of file system to be created. Cloud Run supports the following values: /// * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - /// run.googleapis.com/execution-environment annotation to be set to "gen2" and - /// run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + /// run.googleapis.com/execution-environment annotation to be unset or set to "gen2" /// [Input("driver", required: true)] public Input Driver { get; set; } = null!; diff --git a/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeGetArgs.cs b/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeGetArgs.cs index 7f326a66e9..11ec962010 100644 --- a/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeGetArgs.cs +++ b/sdk/dotnet/CloudRun/Inputs/ServiceTemplateSpecVolumeGetArgs.cs @@ -34,8 +34,7 @@ public sealed class ServiceTemplateSpecVolumeGetArgs : global::Pulumi.ResourceAr /// /// A filesystem backed by a Network File System share. This filesystem requires the - /// run.googleapis.com/execution-environment annotation to be set to "gen2" and - /// run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + /// run.googleapis.com/execution-environment annotation to be unset or set to "gen2" /// Structure is documented below. /// [Input("nfs")] diff --git a/sdk/dotnet/CloudRun/Outputs/GetServiceTemplateSpecVolumeCsiResult.cs b/sdk/dotnet/CloudRun/Outputs/GetServiceTemplateSpecVolumeCsiResult.cs index 6f44cfe50e..6bd518e426 100644 --- a/sdk/dotnet/CloudRun/Outputs/GetServiceTemplateSpecVolumeCsiResult.cs +++ b/sdk/dotnet/CloudRun/Outputs/GetServiceTemplateSpecVolumeCsiResult.cs @@ -16,8 +16,7 @@ public sealed class GetServiceTemplateSpecVolumeCsiResult /// /// Unique name representing the type of file system to be created. Cloud Run supports the following values: /// * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - /// run.googleapis.com/execution-environment annotation to be set to "gen2" and - /// run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + /// run.googleapis.com/execution-environment annotation to be unset or set to "gen2" /// public readonly string Driver; /// diff --git a/sdk/dotnet/CloudRun/Outputs/GetServiceTemplateSpecVolumeResult.cs b/sdk/dotnet/CloudRun/Outputs/GetServiceTemplateSpecVolumeResult.cs index 7e0529b4f8..dcebb0d74d 100644 --- a/sdk/dotnet/CloudRun/Outputs/GetServiceTemplateSpecVolumeResult.cs +++ b/sdk/dotnet/CloudRun/Outputs/GetServiceTemplateSpecVolumeResult.cs @@ -27,8 +27,7 @@ public sealed class GetServiceTemplateSpecVolumeResult public readonly string Name; /// /// A filesystem backed by a Network File System share. This filesystem requires the - /// run.googleapis.com/execution-environment annotation to be set to "gen2" and - /// run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + /// run.googleapis.com/execution-environment annotation to be unset or set to "gen2" /// public readonly ImmutableArray Nfs; /// diff --git a/sdk/dotnet/CloudRun/Outputs/ServiceTemplateSpecVolume.cs b/sdk/dotnet/CloudRun/Outputs/ServiceTemplateSpecVolume.cs index 0ed6a0de73..b41cbdfba9 100644 --- a/sdk/dotnet/CloudRun/Outputs/ServiceTemplateSpecVolume.cs +++ b/sdk/dotnet/CloudRun/Outputs/ServiceTemplateSpecVolume.cs @@ -29,8 +29,7 @@ public sealed class ServiceTemplateSpecVolume public readonly string Name; /// /// A filesystem backed by a Network File System share. This filesystem requires the - /// run.googleapis.com/execution-environment annotation to be set to "gen2" and - /// run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + /// run.googleapis.com/execution-environment annotation to be unset or set to "gen2" /// Structure is documented below. /// public readonly Outputs.ServiceTemplateSpecVolumeNfs? Nfs; diff --git a/sdk/dotnet/CloudRun/Outputs/ServiceTemplateSpecVolumeCsi.cs b/sdk/dotnet/CloudRun/Outputs/ServiceTemplateSpecVolumeCsi.cs index 1e052f1822..a5ef81424d 100644 --- a/sdk/dotnet/CloudRun/Outputs/ServiceTemplateSpecVolumeCsi.cs +++ b/sdk/dotnet/CloudRun/Outputs/ServiceTemplateSpecVolumeCsi.cs @@ -16,8 +16,7 @@ public sealed class ServiceTemplateSpecVolumeCsi /// /// Unique name representing the type of file system to be created. Cloud Run supports the following values: /// * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - /// run.googleapis.com/execution-environment annotation to be set to "gen2" and - /// run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + /// run.googleapis.com/execution-environment annotation to be unset or set to "gen2" /// public readonly string Driver; /// diff --git a/sdk/dotnet/CloudRunV2/Inputs/JobTemplateTemplateVolumeArgs.cs b/sdk/dotnet/CloudRunV2/Inputs/JobTemplateTemplateVolumeArgs.cs index 1420961ce4..a21ed07287 100644 --- a/sdk/dotnet/CloudRunV2/Inputs/JobTemplateTemplateVolumeArgs.cs +++ b/sdk/dotnet/CloudRunV2/Inputs/JobTemplateTemplateVolumeArgs.cs @@ -27,7 +27,7 @@ public sealed class JobTemplateTemplateVolumeArgs : global::Pulumi.ResourceArgs public Input? EmptyDir { get; set; } /// - /// Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + /// Cloud Storage bucket mounted as a volume using GCSFuse. /// Structure is documented below. /// [Input("gcs")] @@ -40,7 +40,7 @@ public sealed class JobTemplateTemplateVolumeArgs : global::Pulumi.ResourceArgs public Input Name { get; set; } = null!; /// - /// NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + /// NFS share mounted as a volume. /// Structure is documented below. /// [Input("nfs")] diff --git a/sdk/dotnet/CloudRunV2/Inputs/JobTemplateTemplateVolumeGetArgs.cs b/sdk/dotnet/CloudRunV2/Inputs/JobTemplateTemplateVolumeGetArgs.cs index 3d630d9da1..594f761684 100644 --- a/sdk/dotnet/CloudRunV2/Inputs/JobTemplateTemplateVolumeGetArgs.cs +++ b/sdk/dotnet/CloudRunV2/Inputs/JobTemplateTemplateVolumeGetArgs.cs @@ -27,7 +27,7 @@ public sealed class JobTemplateTemplateVolumeGetArgs : global::Pulumi.ResourceAr public Input? EmptyDir { get; set; } /// - /// Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + /// Cloud Storage bucket mounted as a volume using GCSFuse. /// Structure is documented below. /// [Input("gcs")] @@ -40,7 +40,7 @@ public sealed class JobTemplateTemplateVolumeGetArgs : global::Pulumi.ResourceAr public Input Name { get; set; } = null!; /// - /// NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + /// NFS share mounted as a volume. /// Structure is documented below. /// [Input("nfs")] diff --git a/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateArgs.cs b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateArgs.cs index e6f6a214a2..98bf252b43 100644 --- a/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateArgs.cs +++ b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateArgs.cs @@ -94,6 +94,13 @@ public InputMap Labels [Input("serviceAccount")] public Input? ServiceAccount { get; set; } + /// + /// Enables Cloud Service Mesh for this Revision. + /// Structure is documented below. + /// + [Input("serviceMesh")] + public Input? ServiceMesh { get; set; } + /// /// Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity /// diff --git a/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateGetArgs.cs b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateGetArgs.cs index 5901871546..4a897639b6 100644 --- a/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateGetArgs.cs +++ b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateGetArgs.cs @@ -94,6 +94,13 @@ public InputMap Labels [Input("serviceAccount")] public Input? ServiceAccount { get; set; } + /// + /// Enables Cloud Service Mesh for this Revision. + /// Structure is documented below. + /// + [Input("serviceMesh")] + public Input? ServiceMesh { get; set; } + /// /// Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity /// diff --git a/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateServiceMeshArgs.cs b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateServiceMeshArgs.cs new file mode 100644 index 0000000000..b86f922d91 --- /dev/null +++ b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateServiceMeshArgs.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudRunV2.Inputs +{ + + public sealed class ServiceTemplateServiceMeshArgs : global::Pulumi.ResourceArgs + { + /// + /// The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + /// + /// - - - + /// + [Input("mesh")] + public Input? Mesh { get; set; } + + public ServiceTemplateServiceMeshArgs() + { + } + public static new ServiceTemplateServiceMeshArgs Empty => new ServiceTemplateServiceMeshArgs(); + } +} diff --git a/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateServiceMeshGetArgs.cs b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateServiceMeshGetArgs.cs new file mode 100644 index 0000000000..163c6fdc30 --- /dev/null +++ b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateServiceMeshGetArgs.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudRunV2.Inputs +{ + + public sealed class ServiceTemplateServiceMeshGetArgs : global::Pulumi.ResourceArgs + { + /// + /// The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + /// + /// - - - + /// + [Input("mesh")] + public Input? Mesh { get; set; } + + public ServiceTemplateServiceMeshGetArgs() + { + } + public static new ServiceTemplateServiceMeshGetArgs Empty => new ServiceTemplateServiceMeshGetArgs(); + } +} diff --git a/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeArgs.cs b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeArgs.cs index 3041cafcce..acd627cf3d 100644 --- a/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeArgs.cs +++ b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeArgs.cs @@ -27,7 +27,7 @@ public sealed class ServiceTemplateVolumeArgs : global::Pulumi.ResourceArgs public Input? EmptyDir { get; set; } /// - /// Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + /// Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. /// Structure is documented below. /// [Input("gcs")] diff --git a/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeGetArgs.cs b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeGetArgs.cs index f621ddb581..7faf38daf4 100644 --- a/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeGetArgs.cs +++ b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeGetArgs.cs @@ -27,7 +27,7 @@ public sealed class ServiceTemplateVolumeGetArgs : global::Pulumi.ResourceArgs public Input? EmptyDir { get; set; } /// - /// Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + /// Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. /// Structure is documented below. /// [Input("gcs")] diff --git a/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeNfsArgs.cs b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeNfsArgs.cs index 4d97c3b7c8..c07647bfa3 100644 --- a/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeNfsArgs.cs +++ b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeNfsArgs.cs @@ -20,8 +20,6 @@ public sealed class ServiceTemplateVolumeNfsArgs : global::Pulumi.ResourceArgs /// /// If true, mount the NFS volume as read only - /// - /// - - - /// [Input("readOnly")] public Input? ReadOnly { get; set; } diff --git a/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeNfsGetArgs.cs b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeNfsGetArgs.cs index cf54edc08b..ccf23e919e 100644 --- a/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeNfsGetArgs.cs +++ b/sdk/dotnet/CloudRunV2/Inputs/ServiceTemplateVolumeNfsGetArgs.cs @@ -20,8 +20,6 @@ public sealed class ServiceTemplateVolumeNfsGetArgs : global::Pulumi.ResourceArg /// /// If true, mount the NFS volume as read only - /// - /// - - - /// [Input("readOnly")] public Input? ReadOnly { get; set; } diff --git a/sdk/dotnet/CloudRunV2/Outputs/GetJobTemplateTemplateVolumeResult.cs b/sdk/dotnet/CloudRunV2/Outputs/GetJobTemplateTemplateVolumeResult.cs index 57f0a7967b..2154ba60ba 100644 --- a/sdk/dotnet/CloudRunV2/Outputs/GetJobTemplateTemplateVolumeResult.cs +++ b/sdk/dotnet/CloudRunV2/Outputs/GetJobTemplateTemplateVolumeResult.cs @@ -22,7 +22,7 @@ public sealed class GetJobTemplateTemplateVolumeResult /// public readonly ImmutableArray EmptyDirs; /// - /// Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + /// Cloud Storage bucket mounted as a volume using GCSFuse. /// public readonly ImmutableArray Gcs; /// @@ -30,7 +30,7 @@ public sealed class GetJobTemplateTemplateVolumeResult /// public readonly string Name; /// - /// NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + /// NFS share mounted as a volume. /// public readonly ImmutableArray Nfs; /// diff --git a/sdk/dotnet/CloudRunV2/Outputs/GetServiceTemplateResult.cs b/sdk/dotnet/CloudRunV2/Outputs/GetServiceTemplateResult.cs index fc5ea61935..1b2ed37cbd 100644 --- a/sdk/dotnet/CloudRunV2/Outputs/GetServiceTemplateResult.cs +++ b/sdk/dotnet/CloudRunV2/Outputs/GetServiceTemplateResult.cs @@ -60,6 +60,10 @@ public sealed class GetServiceTemplateResult /// public readonly string ServiceAccount; /// + /// Enables Cloud Service Mesh for this Revision. + /// + public readonly ImmutableArray ServiceMeshes; + /// /// Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity /// public readonly bool SessionAffinity; @@ -98,6 +102,8 @@ private GetServiceTemplateResult( string serviceAccount, + ImmutableArray serviceMeshes, + bool sessionAffinity, string timeout, @@ -115,6 +121,7 @@ private GetServiceTemplateResult( Revision = revision; Scalings = scalings; ServiceAccount = serviceAccount; + ServiceMeshes = serviceMeshes; SessionAffinity = sessionAffinity; Timeout = timeout; Volumes = volumes; diff --git a/sdk/dotnet/CloudRunV2/Outputs/GetServiceTemplateServiceMeshResult.cs b/sdk/dotnet/CloudRunV2/Outputs/GetServiceTemplateServiceMeshResult.cs new file mode 100644 index 0000000000..094eff2017 --- /dev/null +++ b/sdk/dotnet/CloudRunV2/Outputs/GetServiceTemplateServiceMeshResult.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudRunV2.Outputs +{ + + [OutputType] + public sealed class GetServiceTemplateServiceMeshResult + { + /// + /// The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + /// + public readonly string Mesh; + + [OutputConstructor] + private GetServiceTemplateServiceMeshResult(string mesh) + { + Mesh = mesh; + } + } +} diff --git a/sdk/dotnet/CloudRunV2/Outputs/GetServiceTemplateVolumeResult.cs b/sdk/dotnet/CloudRunV2/Outputs/GetServiceTemplateVolumeResult.cs index c8ae277871..7fabb1a0ae 100644 --- a/sdk/dotnet/CloudRunV2/Outputs/GetServiceTemplateVolumeResult.cs +++ b/sdk/dotnet/CloudRunV2/Outputs/GetServiceTemplateVolumeResult.cs @@ -22,7 +22,7 @@ public sealed class GetServiceTemplateVolumeResult /// public readonly ImmutableArray EmptyDirs; /// - /// Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + /// Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. /// public readonly ImmutableArray Gcs; /// diff --git a/sdk/dotnet/CloudRunV2/Outputs/JobTemplateTemplateVolume.cs b/sdk/dotnet/CloudRunV2/Outputs/JobTemplateTemplateVolume.cs index 60848a087f..5928a770e2 100644 --- a/sdk/dotnet/CloudRunV2/Outputs/JobTemplateTemplateVolume.cs +++ b/sdk/dotnet/CloudRunV2/Outputs/JobTemplateTemplateVolume.cs @@ -24,7 +24,7 @@ public sealed class JobTemplateTemplateVolume /// public readonly Outputs.JobTemplateTemplateVolumeEmptyDir? EmptyDir; /// - /// Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + /// Cloud Storage bucket mounted as a volume using GCSFuse. /// Structure is documented below. /// public readonly Outputs.JobTemplateTemplateVolumeGcs? Gcs; @@ -33,7 +33,7 @@ public sealed class JobTemplateTemplateVolume /// public readonly string Name; /// - /// NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + /// NFS share mounted as a volume. /// Structure is documented below. /// public readonly Outputs.JobTemplateTemplateVolumeNfs? Nfs; diff --git a/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplate.cs b/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplate.cs index 2248b2c3c4..91396bd4bd 100644 --- a/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplate.cs +++ b/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplate.cs @@ -60,6 +60,11 @@ public sealed class ServiceTemplate /// public readonly string? ServiceAccount; /// + /// Enables Cloud Service Mesh for this Revision. + /// Structure is documented below. + /// + public readonly Outputs.ServiceTemplateServiceMesh? ServiceMesh; + /// /// Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity /// public readonly bool? SessionAffinity; @@ -99,6 +104,8 @@ private ServiceTemplate( string? serviceAccount, + Outputs.ServiceTemplateServiceMesh? serviceMesh, + bool? sessionAffinity, string? timeout, @@ -116,6 +123,7 @@ private ServiceTemplate( Revision = revision; Scaling = scaling; ServiceAccount = serviceAccount; + ServiceMesh = serviceMesh; SessionAffinity = sessionAffinity; Timeout = timeout; Volumes = volumes; diff --git a/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplateServiceMesh.cs b/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplateServiceMesh.cs new file mode 100644 index 0000000000..80e73570ce --- /dev/null +++ b/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplateServiceMesh.cs @@ -0,0 +1,29 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudRunV2.Outputs +{ + + [OutputType] + public sealed class ServiceTemplateServiceMesh + { + /// + /// The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + /// + /// - - - + /// + public readonly string? Mesh; + + [OutputConstructor] + private ServiceTemplateServiceMesh(string? mesh) + { + Mesh = mesh; + } + } +} diff --git a/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplateVolume.cs b/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplateVolume.cs index eebfe4f0a2..2d0104f11a 100644 --- a/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplateVolume.cs +++ b/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplateVolume.cs @@ -24,7 +24,7 @@ public sealed class ServiceTemplateVolume /// public readonly Outputs.ServiceTemplateVolumeEmptyDir? EmptyDir; /// - /// Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + /// Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. /// Structure is documented below. /// public readonly Outputs.ServiceTemplateVolumeGcs? Gcs; diff --git a/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplateVolumeNfs.cs b/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplateVolumeNfs.cs index 303ba14f20..a1b5590dbd 100644 --- a/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplateVolumeNfs.cs +++ b/sdk/dotnet/CloudRunV2/Outputs/ServiceTemplateVolumeNfs.cs @@ -19,8 +19,6 @@ public sealed class ServiceTemplateVolumeNfs public readonly string Path; /// /// If true, mount the NFS volume as read only - /// - /// - - - /// public readonly bool? ReadOnly; /// diff --git a/sdk/dotnet/CloudRunV2/Service.cs b/sdk/dotnet/CloudRunV2/Service.cs index 7c092bd5b1..d806511ba8 100644 --- a/sdk/dotnet/CloudRunV2/Service.cs +++ b/sdk/dotnet/CloudRunV2/Service.cs @@ -570,7 +570,6 @@ namespace Pulumi.Gcp.CloudRunV2 /// Name = "cloudrun-service", /// Location = "us-central1", /// DeletionProtection = false, - /// LaunchStage = "BETA", /// Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs /// { /// ExecutionEnvironment = "EXECUTION_ENVIRONMENT_GEN2", @@ -645,7 +644,6 @@ namespace Pulumi.Gcp.CloudRunV2 /// Location = "us-central1", /// DeletionProtection = false, /// Ingress = "INGRESS_TRAFFIC_ALL", - /// LaunchStage = "BETA", /// Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs /// { /// ExecutionEnvironment = "EXECUTION_ENVIRONMENT_GEN2", @@ -693,6 +691,63 @@ namespace Pulumi.Gcp.CloudRunV2 /// /// }); /// ``` + /// ### Cloudrunv2 Service Mesh + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// using Time = Pulumi.Time; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var mesh = new Gcp.NetworkServices.Mesh("mesh", new() + /// { + /// Name = "network-services-mesh", + /// }); + /// + /// var waitForMesh = new Time.Index.Sleep("wait_for_mesh", new() + /// { + /// CreateDuration = "1m", + /// }, new CustomResourceOptions + /// { + /// DependsOn = + /// { + /// mesh, + /// }, + /// }); + /// + /// var @default = new Gcp.CloudRunV2.Service("default", new() + /// { + /// Name = "cloudrun-service", + /// DeletionProtection = false, + /// Location = "us-central1", + /// LaunchStage = "BETA", + /// Template = new Gcp.CloudRunV2.Inputs.ServiceTemplateArgs + /// { + /// Containers = new[] + /// { + /// new Gcp.CloudRunV2.Inputs.ServiceTemplateContainerArgs + /// { + /// Image = "us-docker.pkg.dev/cloudrun/container/hello", + /// }, + /// }, + /// ServiceMesh = new Gcp.CloudRunV2.Inputs.ServiceTemplateServiceMeshArgs + /// { + /// Mesh = mesh.Id, + /// }, + /// }, + /// }, new CustomResourceOptions + /// { + /// DependsOn = + /// { + /// waitForMesh, + /// }, + /// }); + /// + /// }); + /// ``` /// /// ## Import /// diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetArgs.cs new file mode 100644 index 0000000000..753246b960 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetArgs.cs @@ -0,0 +1,74 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetArgs : global::Pulumi.ResourceArgs + { + [Input("headerOverrides")] + private InputList? _headerOverrides; + + /// + /// HTTP target headers. + /// This map contains the header field names and values. + /// Headers will be set when running the CreateTask and/or BufferTask. + /// These headers represent a subset of the headers that will be configured for the task's HTTP request. + /// Some HTTP request headers will be ignored or replaced. + /// Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + /// The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + /// Structure is documented below. + /// + public InputList HeaderOverrides + { + get => _headerOverrides ?? (_headerOverrides = new InputList()); + set => _headerOverrides = value; + } + + /// + /// The HTTP method to use for the request. + /// When specified, it overrides HttpRequest for the task. + /// Note that if the value is set to GET the body of the task will be ignored at execution time. + /// Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + /// + [Input("httpMethod")] + public Input? HttpMethod { get; set; } + + /// + /// If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + /// This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + /// Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + /// Structure is documented below. + /// + [Input("oauthToken")] + public Input? OauthToken { get; set; } + + /// + /// If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + /// This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + /// Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + /// Structure is documented below. + /// + [Input("oidcToken")] + public Input? OidcToken { get; set; } + + /// + /// URI override. + /// When specified, overrides the execution URI for all the tasks in the queue. + /// Structure is documented below. + /// + [Input("uriOverride")] + public Input? UriOverride { get; set; } + + public QueueHttpTargetArgs() + { + } + public static new QueueHttpTargetArgs Empty => new QueueHttpTargetArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetGetArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetGetArgs.cs new file mode 100644 index 0000000000..ed45bfa479 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetGetArgs.cs @@ -0,0 +1,74 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetGetArgs : global::Pulumi.ResourceArgs + { + [Input("headerOverrides")] + private InputList? _headerOverrides; + + /// + /// HTTP target headers. + /// This map contains the header field names and values. + /// Headers will be set when running the CreateTask and/or BufferTask. + /// These headers represent a subset of the headers that will be configured for the task's HTTP request. + /// Some HTTP request headers will be ignored or replaced. + /// Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + /// The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + /// Structure is documented below. + /// + public InputList HeaderOverrides + { + get => _headerOverrides ?? (_headerOverrides = new InputList()); + set => _headerOverrides = value; + } + + /// + /// The HTTP method to use for the request. + /// When specified, it overrides HttpRequest for the task. + /// Note that if the value is set to GET the body of the task will be ignored at execution time. + /// Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + /// + [Input("httpMethod")] + public Input? HttpMethod { get; set; } + + /// + /// If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + /// This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + /// Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + /// Structure is documented below. + /// + [Input("oauthToken")] + public Input? OauthToken { get; set; } + + /// + /// If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + /// This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + /// Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + /// Structure is documented below. + /// + [Input("oidcToken")] + public Input? OidcToken { get; set; } + + /// + /// URI override. + /// When specified, overrides the execution URI for all the tasks in the queue. + /// Structure is documented below. + /// + [Input("uriOverride")] + public Input? UriOverride { get; set; } + + public QueueHttpTargetGetArgs() + { + } + public static new QueueHttpTargetGetArgs Empty => new QueueHttpTargetGetArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideArgs.cs new file mode 100644 index 0000000000..d7b9036f07 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideArgs.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetHeaderOverrideArgs : global::Pulumi.ResourceArgs + { + /// + /// Header embodying a key and a value. + /// Structure is documented below. + /// + [Input("header", required: true)] + public Input Header { get; set; } = null!; + + public QueueHttpTargetHeaderOverrideArgs() + { + } + public static new QueueHttpTargetHeaderOverrideArgs Empty => new QueueHttpTargetHeaderOverrideArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideGetArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideGetArgs.cs new file mode 100644 index 0000000000..c810c9a933 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideGetArgs.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetHeaderOverrideGetArgs : global::Pulumi.ResourceArgs + { + /// + /// Header embodying a key and a value. + /// Structure is documented below. + /// + [Input("header", required: true)] + public Input Header { get; set; } = null!; + + public QueueHttpTargetHeaderOverrideGetArgs() + { + } + public static new QueueHttpTargetHeaderOverrideGetArgs Empty => new QueueHttpTargetHeaderOverrideGetArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideHeaderArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideHeaderArgs.cs new file mode 100644 index 0000000000..3efb183c38 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideHeaderArgs.cs @@ -0,0 +1,32 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetHeaderOverrideHeaderArgs : global::Pulumi.ResourceArgs + { + /// + /// The Key of the header. + /// + [Input("key", required: true)] + public Input Key { get; set; } = null!; + + /// + /// The Value of the header. + /// + [Input("value", required: true)] + public Input Value { get; set; } = null!; + + public QueueHttpTargetHeaderOverrideHeaderArgs() + { + } + public static new QueueHttpTargetHeaderOverrideHeaderArgs Empty => new QueueHttpTargetHeaderOverrideHeaderArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideHeaderGetArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideHeaderGetArgs.cs new file mode 100644 index 0000000000..a1ebfc00e2 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetHeaderOverrideHeaderGetArgs.cs @@ -0,0 +1,32 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetHeaderOverrideHeaderGetArgs : global::Pulumi.ResourceArgs + { + /// + /// The Key of the header. + /// + [Input("key", required: true)] + public Input Key { get; set; } = null!; + + /// + /// The Value of the header. + /// + [Input("value", required: true)] + public Input Value { get; set; } = null!; + + public QueueHttpTargetHeaderOverrideHeaderGetArgs() + { + } + public static new QueueHttpTargetHeaderOverrideHeaderGetArgs Empty => new QueueHttpTargetHeaderOverrideHeaderGetArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOauthTokenArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOauthTokenArgs.cs new file mode 100644 index 0000000000..e13894fbcb --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOauthTokenArgs.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetOauthTokenArgs : global::Pulumi.ResourceArgs + { + /// + /// OAuth scope to be used for generating OAuth access token. + /// If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + /// + [Input("scope")] + public Input? Scope { get; set; } + + /// + /// Service account email to be used for generating OAuth token. + /// The service account must be within the same project as the queue. + /// The caller must have iam.serviceAccounts.actAs permission for the service account. + /// + [Input("serviceAccountEmail", required: true)] + public Input ServiceAccountEmail { get; set; } = null!; + + public QueueHttpTargetOauthTokenArgs() + { + } + public static new QueueHttpTargetOauthTokenArgs Empty => new QueueHttpTargetOauthTokenArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOauthTokenGetArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOauthTokenGetArgs.cs new file mode 100644 index 0000000000..1513704431 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOauthTokenGetArgs.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetOauthTokenGetArgs : global::Pulumi.ResourceArgs + { + /// + /// OAuth scope to be used for generating OAuth access token. + /// If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + /// + [Input("scope")] + public Input? Scope { get; set; } + + /// + /// Service account email to be used for generating OAuth token. + /// The service account must be within the same project as the queue. + /// The caller must have iam.serviceAccounts.actAs permission for the service account. + /// + [Input("serviceAccountEmail", required: true)] + public Input ServiceAccountEmail { get; set; } = null!; + + public QueueHttpTargetOauthTokenGetArgs() + { + } + public static new QueueHttpTargetOauthTokenGetArgs Empty => new QueueHttpTargetOauthTokenGetArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOidcTokenArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOidcTokenArgs.cs new file mode 100644 index 0000000000..60a5745e94 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOidcTokenArgs.cs @@ -0,0 +1,34 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetOidcTokenArgs : global::Pulumi.ResourceArgs + { + /// + /// Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + /// + [Input("audience")] + public Input? Audience { get; set; } + + /// + /// Service account email to be used for generating OIDC token. + /// The service account must be within the same project as the queue. + /// The caller must have iam.serviceAccounts.actAs permission for the service account. + /// + [Input("serviceAccountEmail", required: true)] + public Input ServiceAccountEmail { get; set; } = null!; + + public QueueHttpTargetOidcTokenArgs() + { + } + public static new QueueHttpTargetOidcTokenArgs Empty => new QueueHttpTargetOidcTokenArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOidcTokenGetArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOidcTokenGetArgs.cs new file mode 100644 index 0000000000..c753466b41 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetOidcTokenGetArgs.cs @@ -0,0 +1,34 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetOidcTokenGetArgs : global::Pulumi.ResourceArgs + { + /// + /// Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + /// + [Input("audience")] + public Input? Audience { get; set; } + + /// + /// Service account email to be used for generating OIDC token. + /// The service account must be within the same project as the queue. + /// The caller must have iam.serviceAccounts.actAs permission for the service account. + /// + [Input("serviceAccountEmail", required: true)] + public Input ServiceAccountEmail { get; set; } = null!; + + public QueueHttpTargetOidcTokenGetArgs() + { + } + public static new QueueHttpTargetOidcTokenGetArgs Empty => new QueueHttpTargetOidcTokenGetArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideArgs.cs new file mode 100644 index 0000000000..db42252237 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideArgs.cs @@ -0,0 +1,73 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetUriOverrideArgs : global::Pulumi.ResourceArgs + { + /// + /// Host override. + /// When specified, replaces the host part of the task URL. + /// For example, if the task URL is "https://www.google.com", and host value + /// is set to "example.net", the overridden URI will be changed to "https://example.net". + /// Host value cannot be an empty string (INVALID_ARGUMENT). + /// + [Input("host")] + public Input? Host { get; set; } + + /// + /// URI path. + /// When specified, replaces the existing path of the task URL. + /// Setting the path value to an empty string clears the URI path segment. + /// Structure is documented below. + /// + [Input("pathOverride")] + public Input? PathOverride { get; set; } + + /// + /// Port override. + /// When specified, replaces the port part of the task URI. + /// For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + /// Note that the port value must be a positive integer. + /// Setting the port to 0 (Zero) clears the URI port. + /// + [Input("port")] + public Input? Port { get; set; } + + /// + /// URI query. + /// When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + /// Structure is documented below. + /// + [Input("queryOverride")] + public Input? QueryOverride { get; set; } + + /// + /// Scheme override. + /// When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + /// Possible values are: `HTTP`, `HTTPS`. + /// + [Input("scheme")] + public Input? Scheme { get; set; } + + /// + /// URI Override Enforce Mode + /// When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + /// Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + /// + [Input("uriOverrideEnforceMode")] + public Input? UriOverrideEnforceMode { get; set; } + + public QueueHttpTargetUriOverrideArgs() + { + } + public static new QueueHttpTargetUriOverrideArgs Empty => new QueueHttpTargetUriOverrideArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideGetArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideGetArgs.cs new file mode 100644 index 0000000000..9756c12471 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideGetArgs.cs @@ -0,0 +1,73 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetUriOverrideGetArgs : global::Pulumi.ResourceArgs + { + /// + /// Host override. + /// When specified, replaces the host part of the task URL. + /// For example, if the task URL is "https://www.google.com", and host value + /// is set to "example.net", the overridden URI will be changed to "https://example.net". + /// Host value cannot be an empty string (INVALID_ARGUMENT). + /// + [Input("host")] + public Input? Host { get; set; } + + /// + /// URI path. + /// When specified, replaces the existing path of the task URL. + /// Setting the path value to an empty string clears the URI path segment. + /// Structure is documented below. + /// + [Input("pathOverride")] + public Input? PathOverride { get; set; } + + /// + /// Port override. + /// When specified, replaces the port part of the task URI. + /// For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + /// Note that the port value must be a positive integer. + /// Setting the port to 0 (Zero) clears the URI port. + /// + [Input("port")] + public Input? Port { get; set; } + + /// + /// URI query. + /// When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + /// Structure is documented below. + /// + [Input("queryOverride")] + public Input? QueryOverride { get; set; } + + /// + /// Scheme override. + /// When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + /// Possible values are: `HTTP`, `HTTPS`. + /// + [Input("scheme")] + public Input? Scheme { get; set; } + + /// + /// URI Override Enforce Mode + /// When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + /// Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + /// + [Input("uriOverrideEnforceMode")] + public Input? UriOverrideEnforceMode { get; set; } + + public QueueHttpTargetUriOverrideGetArgs() + { + } + public static new QueueHttpTargetUriOverrideGetArgs Empty => new QueueHttpTargetUriOverrideGetArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverridePathOverrideArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverridePathOverrideArgs.cs new file mode 100644 index 0000000000..7892318157 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverridePathOverrideArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetUriOverridePathOverrideArgs : global::Pulumi.ResourceArgs + { + /// + /// The URI path (e.g., /users/1234). Default is an empty string. + /// + [Input("path")] + public Input? Path { get; set; } + + public QueueHttpTargetUriOverridePathOverrideArgs() + { + } + public static new QueueHttpTargetUriOverridePathOverrideArgs Empty => new QueueHttpTargetUriOverridePathOverrideArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverridePathOverrideGetArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverridePathOverrideGetArgs.cs new file mode 100644 index 0000000000..a331d7bf09 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverridePathOverrideGetArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetUriOverridePathOverrideGetArgs : global::Pulumi.ResourceArgs + { + /// + /// The URI path (e.g., /users/1234). Default is an empty string. + /// + [Input("path")] + public Input? Path { get; set; } + + public QueueHttpTargetUriOverridePathOverrideGetArgs() + { + } + public static new QueueHttpTargetUriOverridePathOverrideGetArgs Empty => new QueueHttpTargetUriOverridePathOverrideGetArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideQueryOverrideArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideQueryOverrideArgs.cs new file mode 100644 index 0000000000..b78cf240fd --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideQueryOverrideArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetUriOverrideQueryOverrideArgs : global::Pulumi.ResourceArgs + { + /// + /// The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + /// + [Input("queryParams")] + public Input? QueryParams { get; set; } + + public QueueHttpTargetUriOverrideQueryOverrideArgs() + { + } + public static new QueueHttpTargetUriOverrideQueryOverrideArgs Empty => new QueueHttpTargetUriOverrideQueryOverrideArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideQueryOverrideGetArgs.cs b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideQueryOverrideGetArgs.cs new file mode 100644 index 0000000000..8b6024c36f --- /dev/null +++ b/sdk/dotnet/CloudTasks/Inputs/QueueHttpTargetUriOverrideQueryOverrideGetArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Inputs +{ + + public sealed class QueueHttpTargetUriOverrideQueryOverrideGetArgs : global::Pulumi.ResourceArgs + { + /// + /// The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + /// + [Input("queryParams")] + public Input? QueryParams { get; set; } + + public QueueHttpTargetUriOverrideQueryOverrideGetArgs() + { + } + public static new QueueHttpTargetUriOverrideQueryOverrideGetArgs Empty => new QueueHttpTargetUriOverrideQueryOverrideGetArgs(); + } +} diff --git a/sdk/dotnet/CloudTasks/Outputs/QueueHttpTarget.cs b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTarget.cs new file mode 100644 index 0000000000..90e30fc6d6 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTarget.cs @@ -0,0 +1,74 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Outputs +{ + + [OutputType] + public sealed class QueueHttpTarget + { + /// + /// HTTP target headers. + /// This map contains the header field names and values. + /// Headers will be set when running the CreateTask and/or BufferTask. + /// These headers represent a subset of the headers that will be configured for the task's HTTP request. + /// Some HTTP request headers will be ignored or replaced. + /// Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + /// The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + /// Structure is documented below. + /// + public readonly ImmutableArray HeaderOverrides; + /// + /// The HTTP method to use for the request. + /// When specified, it overrides HttpRequest for the task. + /// Note that if the value is set to GET the body of the task will be ignored at execution time. + /// Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + /// + public readonly string? HttpMethod; + /// + /// If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + /// This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + /// Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + /// Structure is documented below. + /// + public readonly Outputs.QueueHttpTargetOauthToken? OauthToken; + /// + /// If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + /// This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + /// Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + /// Structure is documented below. + /// + public readonly Outputs.QueueHttpTargetOidcToken? OidcToken; + /// + /// URI override. + /// When specified, overrides the execution URI for all the tasks in the queue. + /// Structure is documented below. + /// + public readonly Outputs.QueueHttpTargetUriOverride? UriOverride; + + [OutputConstructor] + private QueueHttpTarget( + ImmutableArray headerOverrides, + + string? httpMethod, + + Outputs.QueueHttpTargetOauthToken? oauthToken, + + Outputs.QueueHttpTargetOidcToken? oidcToken, + + Outputs.QueueHttpTargetUriOverride? uriOverride) + { + HeaderOverrides = headerOverrides; + HttpMethod = httpMethod; + OauthToken = oauthToken; + OidcToken = oidcToken; + UriOverride = uriOverride; + } + } +} diff --git a/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetHeaderOverride.cs b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetHeaderOverride.cs new file mode 100644 index 0000000000..5e3cc0ea84 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetHeaderOverride.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Outputs +{ + + [OutputType] + public sealed class QueueHttpTargetHeaderOverride + { + /// + /// Header embodying a key and a value. + /// Structure is documented below. + /// + public readonly Outputs.QueueHttpTargetHeaderOverrideHeader Header; + + [OutputConstructor] + private QueueHttpTargetHeaderOverride(Outputs.QueueHttpTargetHeaderOverrideHeader header) + { + Header = header; + } + } +} diff --git a/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetHeaderOverrideHeader.cs b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetHeaderOverrideHeader.cs new file mode 100644 index 0000000000..0eeb09ecd3 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetHeaderOverrideHeader.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Outputs +{ + + [OutputType] + public sealed class QueueHttpTargetHeaderOverrideHeader + { + /// + /// The Key of the header. + /// + public readonly string Key; + /// + /// The Value of the header. + /// + public readonly string Value; + + [OutputConstructor] + private QueueHttpTargetHeaderOverrideHeader( + string key, + + string value) + { + Key = key; + Value = value; + } + } +} diff --git a/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetOauthToken.cs b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetOauthToken.cs new file mode 100644 index 0000000000..aa304594df --- /dev/null +++ b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetOauthToken.cs @@ -0,0 +1,38 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Outputs +{ + + [OutputType] + public sealed class QueueHttpTargetOauthToken + { + /// + /// OAuth scope to be used for generating OAuth access token. + /// If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + /// + public readonly string? Scope; + /// + /// Service account email to be used for generating OAuth token. + /// The service account must be within the same project as the queue. + /// The caller must have iam.serviceAccounts.actAs permission for the service account. + /// + public readonly string ServiceAccountEmail; + + [OutputConstructor] + private QueueHttpTargetOauthToken( + string? scope, + + string serviceAccountEmail) + { + Scope = scope; + ServiceAccountEmail = serviceAccountEmail; + } + } +} diff --git a/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetOidcToken.cs b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetOidcToken.cs new file mode 100644 index 0000000000..efff3fd6f3 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetOidcToken.cs @@ -0,0 +1,37 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Outputs +{ + + [OutputType] + public sealed class QueueHttpTargetOidcToken + { + /// + /// Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + /// + public readonly string? Audience; + /// + /// Service account email to be used for generating OIDC token. + /// The service account must be within the same project as the queue. + /// The caller must have iam.serviceAccounts.actAs permission for the service account. + /// + public readonly string ServiceAccountEmail; + + [OutputConstructor] + private QueueHttpTargetOidcToken( + string? audience, + + string serviceAccountEmail) + { + Audience = audience; + ServiceAccountEmail = serviceAccountEmail; + } + } +} diff --git a/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetUriOverride.cs b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetUriOverride.cs new file mode 100644 index 0000000000..f8607c5ff8 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetUriOverride.cs @@ -0,0 +1,80 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Outputs +{ + + [OutputType] + public sealed class QueueHttpTargetUriOverride + { + /// + /// Host override. + /// When specified, replaces the host part of the task URL. + /// For example, if the task URL is "https://www.google.com", and host value + /// is set to "example.net", the overridden URI will be changed to "https://example.net". + /// Host value cannot be an empty string (INVALID_ARGUMENT). + /// + public readonly string? Host; + /// + /// URI path. + /// When specified, replaces the existing path of the task URL. + /// Setting the path value to an empty string clears the URI path segment. + /// Structure is documented below. + /// + public readonly Outputs.QueueHttpTargetUriOverridePathOverride? PathOverride; + /// + /// Port override. + /// When specified, replaces the port part of the task URI. + /// For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + /// Note that the port value must be a positive integer. + /// Setting the port to 0 (Zero) clears the URI port. + /// + public readonly string? Port; + /// + /// URI query. + /// When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + /// Structure is documented below. + /// + public readonly Outputs.QueueHttpTargetUriOverrideQueryOverride? QueryOverride; + /// + /// Scheme override. + /// When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + /// Possible values are: `HTTP`, `HTTPS`. + /// + public readonly string? Scheme; + /// + /// URI Override Enforce Mode + /// When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + /// Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + /// + public readonly string? UriOverrideEnforceMode; + + [OutputConstructor] + private QueueHttpTargetUriOverride( + string? host, + + Outputs.QueueHttpTargetUriOverridePathOverride? pathOverride, + + string? port, + + Outputs.QueueHttpTargetUriOverrideQueryOverride? queryOverride, + + string? scheme, + + string? uriOverrideEnforceMode) + { + Host = host; + PathOverride = pathOverride; + Port = port; + QueryOverride = queryOverride; + Scheme = scheme; + UriOverrideEnforceMode = uriOverrideEnforceMode; + } + } +} diff --git a/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetUriOverridePathOverride.cs b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetUriOverridePathOverride.cs new file mode 100644 index 0000000000..50bf7f32ea --- /dev/null +++ b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetUriOverridePathOverride.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Outputs +{ + + [OutputType] + public sealed class QueueHttpTargetUriOverridePathOverride + { + /// + /// The URI path (e.g., /users/1234). Default is an empty string. + /// + public readonly string? Path; + + [OutputConstructor] + private QueueHttpTargetUriOverridePathOverride(string? path) + { + Path = path; + } + } +} diff --git a/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetUriOverrideQueryOverride.cs b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetUriOverrideQueryOverride.cs new file mode 100644 index 0000000000..8b37f13c43 --- /dev/null +++ b/sdk/dotnet/CloudTasks/Outputs/QueueHttpTargetUriOverrideQueryOverride.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.CloudTasks.Outputs +{ + + [OutputType] + public sealed class QueueHttpTargetUriOverrideQueryOverride + { + /// + /// The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + /// + public readonly string? QueryParams; + + [OutputConstructor] + private QueueHttpTargetUriOverrideQueryOverride(string? queryParams) + { + QueryParams = queryParams; + } + } +} diff --git a/sdk/dotnet/CloudTasks/Queue.cs b/sdk/dotnet/CloudTasks/Queue.cs index 14754b45d6..40fe41a746 100644 --- a/sdk/dotnet/CloudTasks/Queue.cs +++ b/sdk/dotnet/CloudTasks/Queue.cs @@ -73,6 +73,140 @@ namespace Pulumi.Gcp.CloudTasks /// /// }); /// ``` + /// ### Cloud Tasks Queue Http Target Oidc + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var oidcServiceAccount = new Gcp.ServiceAccount.Account("oidc_service_account", new() + /// { + /// AccountId = "example-oidc", + /// DisplayName = "Tasks Queue OIDC Service Account", + /// }); + /// + /// var httpTargetOidc = new Gcp.CloudTasks.Queue("http_target_oidc", new() + /// { + /// Name = "cloud-tasks-queue-http-target-oidc", + /// Location = "us-central1", + /// HttpTarget = new Gcp.CloudTasks.Inputs.QueueHttpTargetArgs + /// { + /// HttpMethod = "POST", + /// UriOverride = new Gcp.CloudTasks.Inputs.QueueHttpTargetUriOverrideArgs + /// { + /// Scheme = "HTTPS", + /// Host = "oidc.example.com", + /// Port = "8443", + /// PathOverride = new Gcp.CloudTasks.Inputs.QueueHttpTargetUriOverridePathOverrideArgs + /// { + /// Path = "/users/1234", + /// }, + /// QueryOverride = new Gcp.CloudTasks.Inputs.QueueHttpTargetUriOverrideQueryOverrideArgs + /// { + /// QueryParams = "qparam1=123&qparam2=456", + /// }, + /// UriOverrideEnforceMode = "IF_NOT_EXISTS", + /// }, + /// HeaderOverrides = new[] + /// { + /// new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideArgs + /// { + /// Header = new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideHeaderArgs + /// { + /// Key = "AddSomethingElse", + /// Value = "MyOtherValue", + /// }, + /// }, + /// new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideArgs + /// { + /// Header = new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideHeaderArgs + /// { + /// Key = "AddMe", + /// Value = "MyValue", + /// }, + /// }, + /// }, + /// OidcToken = new Gcp.CloudTasks.Inputs.QueueHttpTargetOidcTokenArgs + /// { + /// ServiceAccountEmail = oidcServiceAccount.Email, + /// Audience = "https://oidc.example.com", + /// }, + /// }, + /// }); + /// + /// }); + /// ``` + /// ### Cloud Tasks Queue Http Target Oauth + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var oauthServiceAccount = new Gcp.ServiceAccount.Account("oauth_service_account", new() + /// { + /// AccountId = "example-oauth", + /// DisplayName = "Tasks Queue OAuth Service Account", + /// }); + /// + /// var httpTargetOauth = new Gcp.CloudTasks.Queue("http_target_oauth", new() + /// { + /// Name = "cloud-tasks-queue-http-target-oauth", + /// Location = "us-central1", + /// HttpTarget = new Gcp.CloudTasks.Inputs.QueueHttpTargetArgs + /// { + /// HttpMethod = "POST", + /// UriOverride = new Gcp.CloudTasks.Inputs.QueueHttpTargetUriOverrideArgs + /// { + /// Scheme = "HTTPS", + /// Host = "oauth.example.com", + /// Port = "8443", + /// PathOverride = new Gcp.CloudTasks.Inputs.QueueHttpTargetUriOverridePathOverrideArgs + /// { + /// Path = "/users/1234", + /// }, + /// QueryOverride = new Gcp.CloudTasks.Inputs.QueueHttpTargetUriOverrideQueryOverrideArgs + /// { + /// QueryParams = "qparam1=123&qparam2=456", + /// }, + /// UriOverrideEnforceMode = "IF_NOT_EXISTS", + /// }, + /// HeaderOverrides = new[] + /// { + /// new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideArgs + /// { + /// Header = new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideHeaderArgs + /// { + /// Key = "AddSomethingElse", + /// Value = "MyOtherValue", + /// }, + /// }, + /// new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideArgs + /// { + /// Header = new Gcp.CloudTasks.Inputs.QueueHttpTargetHeaderOverrideHeaderArgs + /// { + /// Key = "AddMe", + /// Value = "MyValue", + /// }, + /// }, + /// }, + /// OauthToken = new Gcp.CloudTasks.Inputs.QueueHttpTargetOauthTokenArgs + /// { + /// ServiceAccountEmail = oauthServiceAccount.Email, + /// Scope = "openid https://www.googleapis.com/auth/userinfo.email", + /// }, + /// }, + /// }); + /// + /// }); + /// ``` /// /// ## Import /// @@ -109,6 +243,13 @@ public partial class Queue : global::Pulumi.CustomResource [Output("appEngineRoutingOverride")] public Output AppEngineRoutingOverride { get; private set; } = null!; + /// + /// Modifies HTTP target for HTTP tasks. + /// Structure is documented below. + /// + [Output("httpTarget")] + public Output HttpTarget { get; private set; } = null!; + /// /// The location of the queue /// @@ -212,6 +353,13 @@ public sealed class QueueArgs : global::Pulumi.ResourceArgs [Input("appEngineRoutingOverride")] public Input? AppEngineRoutingOverride { get; set; } + /// + /// Modifies HTTP target for HTTP tasks. + /// Structure is documented below. + /// + [Input("httpTarget")] + public Input? HttpTarget { get; set; } + /// /// The location of the queue /// @@ -277,6 +425,13 @@ public sealed class QueueState : global::Pulumi.ResourceArgs [Input("appEngineRoutingOverride")] public Input? AppEngineRoutingOverride { get; set; } + /// + /// Modifies HTTP target for HTTP tasks. + /// Structure is documented below. + /// + [Input("httpTarget")] + public Input? HttpTarget { get; set; } + /// /// The location of the queue /// diff --git a/sdk/dotnet/Compute/GetInstance.cs b/sdk/dotnet/Compute/GetInstance.cs index 46be3949a4..319cacf244 100644 --- a/sdk/dotnet/Compute/GetInstance.cs +++ b/sdk/dotnet/Compute/GetInstance.cs @@ -165,7 +165,7 @@ public sealed class GetInstanceResult /// public readonly string CpuPlatform; /// - /// The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + /// The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). /// public readonly string CurrentStatus; /// diff --git a/sdk/dotnet/Compute/HealthCheck.cs b/sdk/dotnet/Compute/HealthCheck.cs index 27cb0dbc84..6fdabc3121 100644 --- a/sdk/dotnet/Compute/HealthCheck.cs +++ b/sdk/dotnet/Compute/HealthCheck.cs @@ -377,6 +377,93 @@ namespace Pulumi.Gcp.Compute /// /// }); /// ``` + /// ### Compute Health Check Http Source Regions + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var http_health_check_with_source_regions = new Gcp.Compute.HealthCheck("http-health-check-with-source-regions", new() + /// { + /// Name = "http-health-check", + /// CheckIntervalSec = 30, + /// HttpHealthCheck = new Gcp.Compute.Inputs.HealthCheckHttpHealthCheckArgs + /// { + /// Port = 80, + /// PortSpecification = "USE_FIXED_PORT", + /// }, + /// SourceRegions = new[] + /// { + /// "us-west1", + /// "us-central1", + /// "us-east5", + /// }, + /// }); + /// + /// }); + /// ``` + /// ### Compute Health Check Https Source Regions + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var https_health_check_with_source_regions = new Gcp.Compute.HealthCheck("https-health-check-with-source-regions", new() + /// { + /// Name = "https-health-check", + /// CheckIntervalSec = 30, + /// HttpsHealthCheck = new Gcp.Compute.Inputs.HealthCheckHttpsHealthCheckArgs + /// { + /// Port = 80, + /// PortSpecification = "USE_FIXED_PORT", + /// }, + /// SourceRegions = new[] + /// { + /// "us-west1", + /// "us-central1", + /// "us-east5", + /// }, + /// }); + /// + /// }); + /// ``` + /// ### Compute Health Check Tcp Source Regions + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var tcp_health_check_with_source_regions = new Gcp.Compute.HealthCheck("tcp-health-check-with-source-regions", new() + /// { + /// Name = "tcp-health-check", + /// CheckIntervalSec = 30, + /// TcpHealthCheck = new Gcp.Compute.Inputs.HealthCheckTcpHealthCheckArgs + /// { + /// Port = 80, + /// PortSpecification = "USE_FIXED_PORT", + /// }, + /// SourceRegions = new[] + /// { + /// "us-west1", + /// "us-central1", + /// "us-east5", + /// }, + /// }); + /// + /// }); + /// ``` /// /// ## Import /// diff --git a/sdk/dotnet/Compute/Inputs/FirewallAllowArgs.cs b/sdk/dotnet/Compute/Inputs/FirewallAllowArgs.cs index 5a33810ebc..8a3ec0a656 100644 --- a/sdk/dotnet/Compute/Inputs/FirewallAllowArgs.cs +++ b/sdk/dotnet/Compute/Inputs/FirewallAllowArgs.cs @@ -20,7 +20,7 @@ public sealed class FirewallAllowArgs : global::Pulumi.ResourceArgs /// is only applicable for UDP or TCP protocol. Each entry must be /// either an integer or a range. If not specified, this rule /// applies to connections through any port. - /// Example inputs include: ["22"], ["80","443"], and + /// Example inputs include: [22], [80, 443], and /// ["12345-12349"]. /// public InputList Ports diff --git a/sdk/dotnet/Compute/Inputs/FirewallAllowGetArgs.cs b/sdk/dotnet/Compute/Inputs/FirewallAllowGetArgs.cs index e9d7a2fd73..7260502f2e 100644 --- a/sdk/dotnet/Compute/Inputs/FirewallAllowGetArgs.cs +++ b/sdk/dotnet/Compute/Inputs/FirewallAllowGetArgs.cs @@ -20,7 +20,7 @@ public sealed class FirewallAllowGetArgs : global::Pulumi.ResourceArgs /// is only applicable for UDP or TCP protocol. Each entry must be /// either an integer or a range. If not specified, this rule /// applies to connections through any port. - /// Example inputs include: ["22"], ["80","443"], and + /// Example inputs include: [22], [80, 443], and /// ["12345-12349"]. /// public InputList Ports diff --git a/sdk/dotnet/Compute/Inputs/FirewallDenyArgs.cs b/sdk/dotnet/Compute/Inputs/FirewallDenyArgs.cs index ff75067289..d183afe1fa 100644 --- a/sdk/dotnet/Compute/Inputs/FirewallDenyArgs.cs +++ b/sdk/dotnet/Compute/Inputs/FirewallDenyArgs.cs @@ -20,7 +20,7 @@ public sealed class FirewallDenyArgs : global::Pulumi.ResourceArgs /// is only applicable for UDP or TCP protocol. Each entry must be /// either an integer or a range. If not specified, this rule /// applies to connections through any port. - /// Example inputs include: ["22"], ["80","443"], and + /// Example inputs include: [22], [80, 443], and /// ["12345-12349"]. /// public InputList Ports diff --git a/sdk/dotnet/Compute/Inputs/FirewallDenyGetArgs.cs b/sdk/dotnet/Compute/Inputs/FirewallDenyGetArgs.cs index 8ef0dbb1a6..b0e82821ed 100644 --- a/sdk/dotnet/Compute/Inputs/FirewallDenyGetArgs.cs +++ b/sdk/dotnet/Compute/Inputs/FirewallDenyGetArgs.cs @@ -20,7 +20,7 @@ public sealed class FirewallDenyGetArgs : global::Pulumi.ResourceArgs /// is only applicable for UDP or TCP protocol. Each entry must be /// either an integer or a range. If not specified, this rule /// applies to connections through any port. - /// Example inputs include: ["22"], ["80","443"], and + /// Example inputs include: [22], [80, 443], and /// ["12345-12349"]. /// public InputList Ports diff --git a/sdk/dotnet/Compute/Inputs/InstanceBootDiskArgs.cs b/sdk/dotnet/Compute/Inputs/InstanceBootDiskArgs.cs index 387da64c82..12a7644c87 100644 --- a/sdk/dotnet/Compute/Inputs/InstanceBootDiskArgs.cs +++ b/sdk/dotnet/Compute/Inputs/InstanceBootDiskArgs.cs @@ -62,6 +62,12 @@ public Input? DiskEncryptionKeyRaw [Input("initializeParams")] public Input? InitializeParams { get; set; } + /// + /// The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + /// + [Input("interface")] + public Input? Interface { get; set; } + /// /// The self_link of the encryption key that is /// stored in Google Cloud KMS to encrypt this disk. Only one of `kms_key_self_link` diff --git a/sdk/dotnet/Compute/Inputs/InstanceBootDiskGetArgs.cs b/sdk/dotnet/Compute/Inputs/InstanceBootDiskGetArgs.cs index 82e5fe31b5..284125b9f2 100644 --- a/sdk/dotnet/Compute/Inputs/InstanceBootDiskGetArgs.cs +++ b/sdk/dotnet/Compute/Inputs/InstanceBootDiskGetArgs.cs @@ -62,6 +62,12 @@ public Input? DiskEncryptionKeyRaw [Input("initializeParams")] public Input? InitializeParams { get; set; } + /// + /// The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + /// + [Input("interface")] + public Input? Interface { get; set; } + /// /// The self_link of the encryption key that is /// stored in Google Cloud KMS to encrypt this disk. Only one of `kms_key_self_link` diff --git a/sdk/dotnet/Compute/Inputs/InstanceFromMachineImageBootDiskArgs.cs b/sdk/dotnet/Compute/Inputs/InstanceFromMachineImageBootDiskArgs.cs index 9abf4bef4a..d4aa344a6c 100644 --- a/sdk/dotnet/Compute/Inputs/InstanceFromMachineImageBootDiskArgs.cs +++ b/sdk/dotnet/Compute/Inputs/InstanceFromMachineImageBootDiskArgs.cs @@ -52,6 +52,12 @@ public Input? DiskEncryptionKeyRaw [Input("initializeParams")] public Input? InitializeParams { get; set; } + /// + /// The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + /// + [Input("interface")] + public Input? Interface { get; set; } + /// /// The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. /// diff --git a/sdk/dotnet/Compute/Inputs/InstanceFromMachineImageBootDiskGetArgs.cs b/sdk/dotnet/Compute/Inputs/InstanceFromMachineImageBootDiskGetArgs.cs index 2040f0be0d..fc99e4da4d 100644 --- a/sdk/dotnet/Compute/Inputs/InstanceFromMachineImageBootDiskGetArgs.cs +++ b/sdk/dotnet/Compute/Inputs/InstanceFromMachineImageBootDiskGetArgs.cs @@ -52,6 +52,12 @@ public Input? DiskEncryptionKeyRaw [Input("initializeParams")] public Input? InitializeParams { get; set; } + /// + /// The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + /// + [Input("interface")] + public Input? Interface { get; set; } + /// /// The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. /// diff --git a/sdk/dotnet/Compute/Inputs/InstanceFromTemplateBootDiskArgs.cs b/sdk/dotnet/Compute/Inputs/InstanceFromTemplateBootDiskArgs.cs index 6b4df72183..aa14598c2c 100644 --- a/sdk/dotnet/Compute/Inputs/InstanceFromTemplateBootDiskArgs.cs +++ b/sdk/dotnet/Compute/Inputs/InstanceFromTemplateBootDiskArgs.cs @@ -52,6 +52,12 @@ public Input? DiskEncryptionKeyRaw [Input("initializeParams")] public Input? InitializeParams { get; set; } + /// + /// The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + /// + [Input("interface")] + public Input? Interface { get; set; } + /// /// The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. /// diff --git a/sdk/dotnet/Compute/Inputs/InstanceFromTemplateBootDiskGetArgs.cs b/sdk/dotnet/Compute/Inputs/InstanceFromTemplateBootDiskGetArgs.cs index 0e65363b00..1a05037c3d 100644 --- a/sdk/dotnet/Compute/Inputs/InstanceFromTemplateBootDiskGetArgs.cs +++ b/sdk/dotnet/Compute/Inputs/InstanceFromTemplateBootDiskGetArgs.cs @@ -52,6 +52,12 @@ public Input? DiskEncryptionKeyRaw [Input("initializeParams")] public Input? InitializeParams { get; set; } + /// + /// The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + /// + [Input("interface")] + public Input? Interface { get; set; } + /// /// The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. /// diff --git a/sdk/dotnet/Compute/Inputs/InstanceNetworkInterfaceArgs.cs b/sdk/dotnet/Compute/Inputs/InstanceNetworkInterfaceArgs.cs index 2fafee2fda..125f61ae43 100644 --- a/sdk/dotnet/Compute/Inputs/InstanceNetworkInterfaceArgs.cs +++ b/sdk/dotnet/Compute/Inputs/InstanceNetworkInterfaceArgs.cs @@ -137,7 +137,7 @@ public InputList Ipv6Access /// /// The project in which the subnetwork belongs. - /// If the `subnetwork` is a self_link, this field is ignored in favor of the project + /// If the `subnetwork` is a self_link, this field is set to the project /// defined in the subnetwork self_link. If the `subnetwork` is a name and this /// field is not provided, the provider project is used. /// diff --git a/sdk/dotnet/Compute/Inputs/InstanceNetworkInterfaceGetArgs.cs b/sdk/dotnet/Compute/Inputs/InstanceNetworkInterfaceGetArgs.cs index 4bb87827b9..29c54f5de5 100644 --- a/sdk/dotnet/Compute/Inputs/InstanceNetworkInterfaceGetArgs.cs +++ b/sdk/dotnet/Compute/Inputs/InstanceNetworkInterfaceGetArgs.cs @@ -137,7 +137,7 @@ public InputList Ipv6Acc /// /// The project in which the subnetwork belongs. - /// If the `subnetwork` is a self_link, this field is ignored in favor of the project + /// If the `subnetwork` is a self_link, this field is set to the project /// defined in the subnetwork self_link. If the `subnetwork` is a name and this /// field is not provided, the provider project is used. /// diff --git a/sdk/dotnet/Compute/Inputs/NodeTemplateAcceleratorArgs.cs b/sdk/dotnet/Compute/Inputs/NodeTemplateAcceleratorArgs.cs new file mode 100644 index 0000000000..acf1bb5ee8 --- /dev/null +++ b/sdk/dotnet/Compute/Inputs/NodeTemplateAcceleratorArgs.cs @@ -0,0 +1,34 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Compute.Inputs +{ + + public sealed class NodeTemplateAcceleratorArgs : global::Pulumi.ResourceArgs + { + /// + /// The number of the guest accelerator cards exposed to this + /// node template. + /// + [Input("acceleratorCount")] + public Input? AcceleratorCount { get; set; } + + /// + /// Full or partial URL of the accelerator type resource to expose + /// to this node template. + /// + [Input("acceleratorType")] + public Input? AcceleratorType { get; set; } + + public NodeTemplateAcceleratorArgs() + { + } + public static new NodeTemplateAcceleratorArgs Empty => new NodeTemplateAcceleratorArgs(); + } +} diff --git a/sdk/dotnet/Compute/Inputs/NodeTemplateAcceleratorGetArgs.cs b/sdk/dotnet/Compute/Inputs/NodeTemplateAcceleratorGetArgs.cs new file mode 100644 index 0000000000..d553e8efd4 --- /dev/null +++ b/sdk/dotnet/Compute/Inputs/NodeTemplateAcceleratorGetArgs.cs @@ -0,0 +1,34 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Compute.Inputs +{ + + public sealed class NodeTemplateAcceleratorGetArgs : global::Pulumi.ResourceArgs + { + /// + /// The number of the guest accelerator cards exposed to this + /// node template. + /// + [Input("acceleratorCount")] + public Input? AcceleratorCount { get; set; } + + /// + /// Full or partial URL of the accelerator type resource to expose + /// to this node template. + /// + [Input("acceleratorType")] + public Input? AcceleratorType { get; set; } + + public NodeTemplateAcceleratorGetArgs() + { + } + public static new NodeTemplateAcceleratorGetArgs Empty => new NodeTemplateAcceleratorGetArgs(); + } +} diff --git a/sdk/dotnet/Compute/Instance.cs b/sdk/dotnet/Compute/Instance.cs index 483908b6a5..6ceb5eca4f 100644 --- a/sdk/dotnet/Compute/Instance.cs +++ b/sdk/dotnet/Compute/Instance.cs @@ -88,6 +88,77 @@ namespace Pulumi.Gcp.Compute /// }); /// ``` /// + /// ### Confidential Computing + /// + /// Example with [Confidential Mode](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) activated. + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @default = new Gcp.ServiceAccount.Account("default", new() + /// { + /// AccountId = "my-custom-sa", + /// DisplayName = "Custom SA for VM Instance", + /// }); + /// + /// var confidentialInstance = new Gcp.Compute.Instance("confidential_instance", new() + /// { + /// NetworkInterfaces = new[] + /// { + /// new Gcp.Compute.Inputs.InstanceNetworkInterfaceArgs + /// { + /// AccessConfigs = new[] + /// { + /// null, + /// }, + /// Network = "default", + /// }, + /// }, + /// Name = "my-confidential-instance", + /// Zone = "us-central1-a", + /// MachineType = "n2d-standard-2", + /// MinCpuPlatform = "AMD Milan", + /// ConfidentialInstanceConfig = new Gcp.Compute.Inputs.InstanceConfidentialInstanceConfigArgs + /// { + /// EnableConfidentialCompute = true, + /// ConfidentialInstanceType = "SEV", + /// }, + /// BootDisk = new Gcp.Compute.Inputs.InstanceBootDiskArgs + /// { + /// InitializeParams = new Gcp.Compute.Inputs.InstanceBootDiskInitializeParamsArgs + /// { + /// Image = "ubuntu-os-cloud/ubuntu-2004-lts", + /// Labels = + /// { + /// { "my_label", "value" }, + /// }, + /// }, + /// }, + /// ScratchDisks = new[] + /// { + /// new Gcp.Compute.Inputs.InstanceScratchDiskArgs + /// { + /// Interface = "NVME", + /// }, + /// }, + /// ServiceAccount = new Gcp.Compute.Inputs.InstanceServiceAccountArgs + /// { + /// Email = @default.Email, + /// Scopes = new[] + /// { + /// "cloud-platform", + /// }, + /// }, + /// }); + /// + /// }); + /// ``` + /// /// ## Import /// /// Instances can be imported using any of these accepted formats: @@ -162,7 +233,7 @@ public partial class Instance : global::Pulumi.CustomResource public Output CpuPlatform { get; private set; } = null!; /// - /// The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + /// The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). /// [Output("currentStatus")] public Output CurrentStatus { get; private set; } = null!; @@ -819,7 +890,7 @@ public InputList AttachedDisks public Input? CpuPlatform { get; set; } /// - /// The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + /// The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). /// [Input("currentStatus")] public Input? CurrentStatus { get; set; } diff --git a/sdk/dotnet/Compute/InstanceTemplate.cs b/sdk/dotnet/Compute/InstanceTemplate.cs index 3e245b66ca..d3a1fb1d5a 100644 --- a/sdk/dotnet/Compute/InstanceTemplate.cs +++ b/sdk/dotnet/Compute/InstanceTemplate.cs @@ -226,6 +226,66 @@ namespace Pulumi.Gcp.Compute /// }); /// ``` /// + /// ### Confidential Computing + /// + /// Example with [Confidential Mode](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) activated. + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var @default = new Gcp.ServiceAccount.Account("default", new() + /// { + /// AccountId = "my-custom-sa", + /// DisplayName = "Custom SA for VM Instance", + /// }); + /// + /// var confidentialInstanceTemplate = new Gcp.Compute.InstanceTemplate("confidential_instance_template", new() + /// { + /// NetworkInterfaces = new[] + /// { + /// new Gcp.Compute.Inputs.InstanceTemplateNetworkInterfaceArgs + /// { + /// AccessConfigs = new[] + /// { + /// null, + /// }, + /// Network = "default", + /// }, + /// }, + /// Name = "my-confidential-instance-template", + /// Region = "us-central1", + /// MachineType = "n2d-standard-2", + /// MinCpuPlatform = "AMD Milan", + /// ConfidentialInstanceConfig = new Gcp.Compute.Inputs.InstanceTemplateConfidentialInstanceConfigArgs + /// { + /// EnableConfidentialCompute = true, + /// ConfidentialInstanceType = "SEV", + /// }, + /// Disks = new[] + /// { + /// new Gcp.Compute.Inputs.InstanceTemplateDiskArgs + /// { + /// SourceImage = "ubuntu-os-cloud/ubuntu-2004-lts", + /// }, + /// }, + /// ServiceAccount = new Gcp.Compute.Inputs.InstanceTemplateServiceAccountArgs + /// { + /// Email = @default.Email, + /// Scopes = new[] + /// { + /// "cloud-platform", + /// }, + /// }, + /// }); + /// + /// }); + /// ``` + /// /// ## Deploying the Latest Image /// /// A common way to use instance templates and managed instance groups is to deploy the diff --git a/sdk/dotnet/Compute/Interconnect.cs b/sdk/dotnet/Compute/Interconnect.cs index f10113c0a8..385ac8f3e8 100644 --- a/sdk/dotnet/Compute/Interconnect.cs +++ b/sdk/dotnet/Compute/Interconnect.cs @@ -281,11 +281,12 @@ public partial class Interconnect : global::Pulumi.CustomResource public Output RemoteLocation { get; private set; } = null!; /// - /// interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + /// interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( /// If specified then the connection is created on MACsec capable hardware ports. If not /// specified, the default value is false, which allocates non-MACsec capable ports first if - /// available). - /// Each value may be one of: `MACSEC`. + /// available). Note that MACSEC is still technically allowed for compatibility reasons, but it + /// does not work with the API, and will be removed in an upcoming major version. + /// Each value may be one of: `MACSEC`, `IF_MACSEC`. /// [Output("requestedFeatures")] public Output> RequestedFeatures { get; private set; } = null!; @@ -485,11 +486,12 @@ public InputMap Labels private InputList? _requestedFeatures; /// - /// interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + /// interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( /// If specified then the connection is created on MACsec capable hardware ports. If not /// specified, the default value is false, which allocates non-MACsec capable ports first if - /// available). - /// Each value may be one of: `MACSEC`. + /// available). Note that MACSEC is still technically allowed for compatibility reasons, but it + /// does not work with the API, and will be removed in an upcoming major version. + /// Each value may be one of: `MACSEC`, `IF_MACSEC`. /// public InputList RequestedFeatures { @@ -772,11 +774,12 @@ public InputMap PulumiLabels private InputList? _requestedFeatures; /// - /// interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + /// interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( /// If specified then the connection is created on MACsec capable hardware ports. If not /// specified, the default value is false, which allocates non-MACsec capable ports first if - /// available). - /// Each value may be one of: `MACSEC`. + /// available). Note that MACSEC is still technically allowed for compatibility reasons, but it + /// does not work with the API, and will be removed in an upcoming major version. + /// Each value may be one of: `MACSEC`, `IF_MACSEC`. /// public InputList RequestedFeatures { diff --git a/sdk/dotnet/Compute/NodeTemplate.cs b/sdk/dotnet/Compute/NodeTemplate.cs index eb3fb6b290..88cbdad318 100644 --- a/sdk/dotnet/Compute/NodeTemplate.cs +++ b/sdk/dotnet/Compute/NodeTemplate.cs @@ -73,6 +73,38 @@ namespace Pulumi.Gcp.Compute /// /// }); /// ``` + /// ### Node Template Accelerators + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var central1a = Gcp.Compute.GetNodeTypes.Invoke(new() + /// { + /// Zone = "us-central1-a", + /// }); + /// + /// var template = new Gcp.Compute.NodeTemplate("template", new() + /// { + /// Name = "soletenant-with-accelerators", + /// Region = "us-central1", + /// NodeType = "n1-node-96-624", + /// Accelerators = new[] + /// { + /// new Gcp.Compute.Inputs.NodeTemplateAcceleratorArgs + /// { + /// AcceleratorType = "nvidia-tesla-t4", + /// AcceleratorCount = 4, + /// }, + /// }, + /// }); + /// + /// }); + /// ``` /// /// ## Import /// @@ -107,6 +139,14 @@ namespace Pulumi.Gcp.Compute [GcpResourceType("gcp:compute/nodeTemplate:NodeTemplate")] public partial class NodeTemplate : global::Pulumi.CustomResource { + /// + /// List of the type and count of accelerator cards attached to the + /// node template + /// Structure is documented below. + /// + [Output("accelerators")] + public Output> Accelerators { get; private set; } = null!; + /// /// CPU overcommit. /// Default value is `NONE`. @@ -231,6 +271,20 @@ public static NodeTemplate Get(string name, Input id, NodeTemplateState? public sealed class NodeTemplateArgs : global::Pulumi.ResourceArgs { + [Input("accelerators")] + private InputList? _accelerators; + + /// + /// List of the type and count of accelerator cards attached to the + /// node template + /// Structure is documented below. + /// + public InputList Accelerators + { + get => _accelerators ?? (_accelerators = new InputList()); + set => _accelerators = value; + } + /// /// CPU overcommit. /// Default value is `NONE`. @@ -311,6 +365,20 @@ public NodeTemplateArgs() public sealed class NodeTemplateState : global::Pulumi.ResourceArgs { + [Input("accelerators")] + private InputList? _accelerators; + + /// + /// List of the type and count of accelerator cards attached to the + /// node template + /// Structure is documented below. + /// + public InputList Accelerators + { + get => _accelerators ?? (_accelerators = new InputList()); + set => _accelerators = value; + } + /// /// CPU overcommit. /// Default value is `NONE`. diff --git a/sdk/dotnet/Compute/Outputs/FirewallAllow.cs b/sdk/dotnet/Compute/Outputs/FirewallAllow.cs index 4a367f9a20..98c8755d9d 100644 --- a/sdk/dotnet/Compute/Outputs/FirewallAllow.cs +++ b/sdk/dotnet/Compute/Outputs/FirewallAllow.cs @@ -18,7 +18,7 @@ public sealed class FirewallAllow /// is only applicable for UDP or TCP protocol. Each entry must be /// either an integer or a range. If not specified, this rule /// applies to connections through any port. - /// Example inputs include: ["22"], ["80","443"], and + /// Example inputs include: [22], [80, 443], and /// ["12345-12349"]. /// public readonly ImmutableArray Ports; diff --git a/sdk/dotnet/Compute/Outputs/FirewallDeny.cs b/sdk/dotnet/Compute/Outputs/FirewallDeny.cs index 8f780205b3..33897cf81d 100644 --- a/sdk/dotnet/Compute/Outputs/FirewallDeny.cs +++ b/sdk/dotnet/Compute/Outputs/FirewallDeny.cs @@ -18,7 +18,7 @@ public sealed class FirewallDeny /// is only applicable for UDP or TCP protocol. Each entry must be /// either an integer or a range. If not specified, this rule /// applies to connections through any port. - /// Example inputs include: ["22"], ["80","443"], and + /// Example inputs include: [22], [80, 443], and /// ["12345-12349"]. /// public readonly ImmutableArray Ports; diff --git a/sdk/dotnet/Compute/Outputs/GetInstanceBootDiskResult.cs b/sdk/dotnet/Compute/Outputs/GetInstanceBootDiskResult.cs index 74119625de..0b5ca37673 100644 --- a/sdk/dotnet/Compute/Outputs/GetInstanceBootDiskResult.cs +++ b/sdk/dotnet/Compute/Outputs/GetInstanceBootDiskResult.cs @@ -38,6 +38,10 @@ public sealed class GetInstanceBootDiskResult /// public readonly ImmutableArray InitializeParams; /// + /// The disk interface used for attaching this disk. One of `SCSI` or `NVME`. + /// + public readonly string Interface; + /// /// The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. /// public readonly string KmsKeySelfLink; @@ -62,6 +66,8 @@ private GetInstanceBootDiskResult( ImmutableArray initializeParams, + string @interface, + string kmsKeySelfLink, string mode, @@ -73,6 +79,7 @@ private GetInstanceBootDiskResult( DiskEncryptionKeyRaw = diskEncryptionKeyRaw; DiskEncryptionKeySha256 = diskEncryptionKeySha256; InitializeParams = initializeParams; + Interface = @interface; KmsKeySelfLink = kmsKeySelfLink; Mode = mode; Source = source; diff --git a/sdk/dotnet/Compute/Outputs/InstanceBootDisk.cs b/sdk/dotnet/Compute/Outputs/InstanceBootDisk.cs index 6a49a1f9d1..72c9f3adad 100644 --- a/sdk/dotnet/Compute/Outputs/InstanceBootDisk.cs +++ b/sdk/dotnet/Compute/Outputs/InstanceBootDisk.cs @@ -44,6 +44,10 @@ public sealed class InstanceBootDisk /// public readonly Outputs.InstanceBootDiskInitializeParams? InitializeParams; /// + /// The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + /// + public readonly string? Interface; + /// /// The self_link of the encryption key that is /// stored in Google Cloud KMS to encrypt this disk. Only one of `kms_key_self_link` /// and `disk_encryption_key_raw` may be set. @@ -73,6 +77,8 @@ private InstanceBootDisk( Outputs.InstanceBootDiskInitializeParams? initializeParams, + string? @interface, + string? kmsKeySelfLink, string? mode, @@ -84,6 +90,7 @@ private InstanceBootDisk( DiskEncryptionKeyRaw = diskEncryptionKeyRaw; DiskEncryptionKeySha256 = diskEncryptionKeySha256; InitializeParams = initializeParams; + Interface = @interface; KmsKeySelfLink = kmsKeySelfLink; Mode = mode; Source = source; diff --git a/sdk/dotnet/Compute/Outputs/InstanceFromMachineImageBootDisk.cs b/sdk/dotnet/Compute/Outputs/InstanceFromMachineImageBootDisk.cs index 5f2bd3ff0b..04ab73b453 100644 --- a/sdk/dotnet/Compute/Outputs/InstanceFromMachineImageBootDisk.cs +++ b/sdk/dotnet/Compute/Outputs/InstanceFromMachineImageBootDisk.cs @@ -34,6 +34,10 @@ public sealed class InstanceFromMachineImageBootDisk /// public readonly Outputs.InstanceFromMachineImageBootDiskInitializeParams? InitializeParams; /// + /// The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + /// + public readonly string? Interface; + /// /// The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. /// public readonly string? KmsKeySelfLink; @@ -58,6 +62,8 @@ private InstanceFromMachineImageBootDisk( Outputs.InstanceFromMachineImageBootDiskInitializeParams? initializeParams, + string? @interface, + string? kmsKeySelfLink, string? mode, @@ -69,6 +75,7 @@ private InstanceFromMachineImageBootDisk( DiskEncryptionKeyRaw = diskEncryptionKeyRaw; DiskEncryptionKeySha256 = diskEncryptionKeySha256; InitializeParams = initializeParams; + Interface = @interface; KmsKeySelfLink = kmsKeySelfLink; Mode = mode; Source = source; diff --git a/sdk/dotnet/Compute/Outputs/InstanceFromTemplateBootDisk.cs b/sdk/dotnet/Compute/Outputs/InstanceFromTemplateBootDisk.cs index b95910147d..aa10ae376f 100644 --- a/sdk/dotnet/Compute/Outputs/InstanceFromTemplateBootDisk.cs +++ b/sdk/dotnet/Compute/Outputs/InstanceFromTemplateBootDisk.cs @@ -34,6 +34,10 @@ public sealed class InstanceFromTemplateBootDisk /// public readonly Outputs.InstanceFromTemplateBootDiskInitializeParams? InitializeParams; /// + /// The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + /// + public readonly string? Interface; + /// /// The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. /// public readonly string? KmsKeySelfLink; @@ -58,6 +62,8 @@ private InstanceFromTemplateBootDisk( Outputs.InstanceFromTemplateBootDiskInitializeParams? initializeParams, + string? @interface, + string? kmsKeySelfLink, string? mode, @@ -69,6 +75,7 @@ private InstanceFromTemplateBootDisk( DiskEncryptionKeyRaw = diskEncryptionKeyRaw; DiskEncryptionKeySha256 = diskEncryptionKeySha256; InitializeParams = initializeParams; + Interface = @interface; KmsKeySelfLink = kmsKeySelfLink; Mode = mode; Source = source; diff --git a/sdk/dotnet/Compute/Outputs/InstanceNetworkInterface.cs b/sdk/dotnet/Compute/Outputs/InstanceNetworkInterface.cs index 8823124766..5a0e82f448 100644 --- a/sdk/dotnet/Compute/Outputs/InstanceNetworkInterface.cs +++ b/sdk/dotnet/Compute/Outputs/InstanceNetworkInterface.cs @@ -90,7 +90,7 @@ public sealed class InstanceNetworkInterface public readonly string? Subnetwork; /// /// The project in which the subnetwork belongs. - /// If the `subnetwork` is a self_link, this field is ignored in favor of the project + /// If the `subnetwork` is a self_link, this field is set to the project /// defined in the subnetwork self_link. If the `subnetwork` is a name and this /// field is not provided, the provider project is used. /// diff --git a/sdk/dotnet/Compute/Outputs/NodeTemplateAccelerator.cs b/sdk/dotnet/Compute/Outputs/NodeTemplateAccelerator.cs new file mode 100644 index 0000000000..405d1e5635 --- /dev/null +++ b/sdk/dotnet/Compute/Outputs/NodeTemplateAccelerator.cs @@ -0,0 +1,37 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Compute.Outputs +{ + + [OutputType] + public sealed class NodeTemplateAccelerator + { + /// + /// The number of the guest accelerator cards exposed to this + /// node template. + /// + public readonly int? AcceleratorCount; + /// + /// Full or partial URL of the accelerator type resource to expose + /// to this node template. + /// + public readonly string? AcceleratorType; + + [OutputConstructor] + private NodeTemplateAccelerator( + int? acceleratorCount, + + string? acceleratorType) + { + AcceleratorCount = acceleratorCount; + AcceleratorType = acceleratorType; + } + } +} diff --git a/sdk/dotnet/Compute/TargetHttpsProxy.cs b/sdk/dotnet/Compute/TargetHttpsProxy.cs index 1c535290bf..92ad6aa766 100644 --- a/sdk/dotnet/Compute/TargetHttpsProxy.cs +++ b/sdk/dotnet/Compute/TargetHttpsProxy.cs @@ -566,6 +566,10 @@ public partial class TargetHttpsProxy : global::Pulumi.CustomResource /// INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED /// loadBalancingScheme consult ServerTlsPolicy documentation. /// If left blank, communications are not encrypted. + /// If you remove this field from your configuration at the same time as + /// deleting or recreating a referenced ServerTlsPolicy resource, you will + /// receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + /// within the ServerTlsPolicy resource to avoid this. /// [Output("serverTlsPolicy")] public Output ServerTlsPolicy { get; private set; } = null!; @@ -741,6 +745,10 @@ public InputList CertificateManagerCertificates /// INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED /// loadBalancingScheme consult ServerTlsPolicy documentation. /// If left blank, communications are not encrypted. + /// If you remove this field from your configuration at the same time as + /// deleting or recreating a referenced ServerTlsPolicy resource, you will + /// receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + /// within the ServerTlsPolicy resource to avoid this. /// [Input("serverTlsPolicy")] public Input? ServerTlsPolicy { get; set; } @@ -902,6 +910,10 @@ public InputList CertificateManagerCertificates /// INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED /// loadBalancingScheme consult ServerTlsPolicy documentation. /// If left blank, communications are not encrypted. + /// If you remove this field from your configuration at the same time as + /// deleting or recreating a referenced ServerTlsPolicy resource, you will + /// receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + /// within the ServerTlsPolicy resource to avoid this. /// [Input("serverTlsPolicy")] public Input? ServerTlsPolicy { get; set; } diff --git a/sdk/dotnet/Container/AttachedCluster.cs b/sdk/dotnet/Container/AttachedCluster.cs index 8519e5f29b..dc69656f75 100644 --- a/sdk/dotnet/Container/AttachedCluster.cs +++ b/sdk/dotnet/Container/AttachedCluster.cs @@ -254,7 +254,7 @@ public partial class AttachedCluster : global::Pulumi.CustomResource public Output CreateTime { get; private set; } = null!; /// - /// Policy to determine what flags to send on delete. + /// Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS /// [Output("deletionPolicy")] public Output DeletionPolicy { get; private set; } = null!; @@ -459,7 +459,7 @@ public InputMap Annotations public Input? BinaryAuthorization { get; set; } /// - /// Policy to determine what flags to send on delete. + /// Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS /// [Input("deletionPolicy")] public Input? DeletionPolicy { get; set; } @@ -591,7 +591,7 @@ public InputMap Annotations public Input? CreateTime { get; set; } /// - /// Policy to determine what flags to send on delete. + /// Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS /// [Input("deletionPolicy")] public Input? DeletionPolicy { get; set; } diff --git a/sdk/dotnet/Container/Inputs/ClusterNodeConfigKubeletConfigArgs.cs b/sdk/dotnet/Container/Inputs/ClusterNodeConfigKubeletConfigArgs.cs index eee6b4f640..f6cb14fae8 100644 --- a/sdk/dotnet/Container/Inputs/ClusterNodeConfigKubeletConfigArgs.cs +++ b/sdk/dotnet/Container/Inputs/ClusterNodeConfigKubeletConfigArgs.cs @@ -41,6 +41,12 @@ public sealed class ClusterNodeConfigKubeletConfigArgs : global::Pulumi.Resource [Input("cpuManagerPolicy", required: true)] public Input CpuManagerPolicy { get; set; } = null!; + /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + [Input("insecureKubeletReadonlyPortEnabled")] + public Input? InsecureKubeletReadonlyPortEnabled { get; set; } + /// /// Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. /// diff --git a/sdk/dotnet/Container/Inputs/ClusterNodeConfigKubeletConfigGetArgs.cs b/sdk/dotnet/Container/Inputs/ClusterNodeConfigKubeletConfigGetArgs.cs index 6cf584a610..cef83b1646 100644 --- a/sdk/dotnet/Container/Inputs/ClusterNodeConfigKubeletConfigGetArgs.cs +++ b/sdk/dotnet/Container/Inputs/ClusterNodeConfigKubeletConfigGetArgs.cs @@ -41,6 +41,12 @@ public sealed class ClusterNodeConfigKubeletConfigGetArgs : global::Pulumi.Resou [Input("cpuManagerPolicy", required: true)] public Input CpuManagerPolicy { get; set; } = null!; + /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + [Input("insecureKubeletReadonlyPortEnabled")] + public Input? InsecureKubeletReadonlyPortEnabled { get; set; } + /// /// Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. /// diff --git a/sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigArgs.cs b/sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigArgs.cs index 1672362b6a..5e989f928b 100644 --- a/sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigArgs.cs +++ b/sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigArgs.cs @@ -13,11 +13,18 @@ namespace Pulumi.Gcp.Container.Inputs public sealed class ClusterNodePoolAutoConfigArgs : global::Pulumi.ResourceArgs { /// - /// The network tag config for the cluster's automatically provisioned node pools. + /// The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. /// [Input("networkTags")] public Input? NetworkTags { get; set; } + /// + /// Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here. + /// Structure is documented below. + /// + [Input("nodeKubeletConfig")] + public Input? NodeKubeletConfig { get; set; } + [Input("resourceManagerTags")] private InputMap? _resourceManagerTags; diff --git a/sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigGetArgs.cs b/sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigGetArgs.cs index 228037bb67..8def989ddf 100644 --- a/sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigGetArgs.cs +++ b/sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigGetArgs.cs @@ -13,11 +13,18 @@ namespace Pulumi.Gcp.Container.Inputs public sealed class ClusterNodePoolAutoConfigGetArgs : global::Pulumi.ResourceArgs { /// - /// The network tag config for the cluster's automatically provisioned node pools. + /// The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. /// [Input("networkTags")] public Input? NetworkTags { get; set; } + /// + /// Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here. + /// Structure is documented below. + /// + [Input("nodeKubeletConfig")] + public Input? NodeKubeletConfig { get; set; } + [Input("resourceManagerTags")] private InputMap? _resourceManagerTags; diff --git a/sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigNodeKubeletConfigArgs.cs b/sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigNodeKubeletConfigArgs.cs new file mode 100644 index 0000000000..a11238073e --- /dev/null +++ b/sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigNodeKubeletConfigArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Container.Inputs +{ + + public sealed class ClusterNodePoolAutoConfigNodeKubeletConfigArgs : global::Pulumi.ResourceArgs + { + /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + [Input("insecureKubeletReadonlyPortEnabled")] + public Input? InsecureKubeletReadonlyPortEnabled { get; set; } + + public ClusterNodePoolAutoConfigNodeKubeletConfigArgs() + { + } + public static new ClusterNodePoolAutoConfigNodeKubeletConfigArgs Empty => new ClusterNodePoolAutoConfigNodeKubeletConfigArgs(); + } +} diff --git a/sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigNodeKubeletConfigGetArgs.cs b/sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigNodeKubeletConfigGetArgs.cs new file mode 100644 index 0000000000..f4ac2ca2f2 --- /dev/null +++ b/sdk/dotnet/Container/Inputs/ClusterNodePoolAutoConfigNodeKubeletConfigGetArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Container.Inputs +{ + + public sealed class ClusterNodePoolAutoConfigNodeKubeletConfigGetArgs : global::Pulumi.ResourceArgs + { + /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + [Input("insecureKubeletReadonlyPortEnabled")] + public Input? InsecureKubeletReadonlyPortEnabled { get; set; } + + public ClusterNodePoolAutoConfigNodeKubeletConfigGetArgs() + { + } + public static new ClusterNodePoolAutoConfigNodeKubeletConfigGetArgs Empty => new ClusterNodePoolAutoConfigNodeKubeletConfigGetArgs(); + } +} diff --git a/sdk/dotnet/Container/Inputs/ClusterNodePoolDefaultsNodeConfigDefaultsArgs.cs b/sdk/dotnet/Container/Inputs/ClusterNodePoolDefaultsNodeConfigDefaultsArgs.cs index 62500408d5..c7fb1a0ded 100644 --- a/sdk/dotnet/Container/Inputs/ClusterNodePoolDefaultsNodeConfigDefaultsArgs.cs +++ b/sdk/dotnet/Container/Inputs/ClusterNodePoolDefaultsNodeConfigDefaultsArgs.cs @@ -24,6 +24,12 @@ public sealed class ClusterNodePoolDefaultsNodeConfigDefaultsArgs : global::Pulu [Input("gcfsConfig")] public Input? GcfsConfig { get; set; } + /// + /// Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + [Input("insecureKubeletReadonlyPortEnabled")] + public Input? InsecureKubeletReadonlyPortEnabled { get; set; } + /// /// The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. /// diff --git a/sdk/dotnet/Container/Inputs/ClusterNodePoolDefaultsNodeConfigDefaultsGetArgs.cs b/sdk/dotnet/Container/Inputs/ClusterNodePoolDefaultsNodeConfigDefaultsGetArgs.cs index 410cdf06f7..02744c2c92 100644 --- a/sdk/dotnet/Container/Inputs/ClusterNodePoolDefaultsNodeConfigDefaultsGetArgs.cs +++ b/sdk/dotnet/Container/Inputs/ClusterNodePoolDefaultsNodeConfigDefaultsGetArgs.cs @@ -24,6 +24,12 @@ public sealed class ClusterNodePoolDefaultsNodeConfigDefaultsGetArgs : global::P [Input("gcfsConfig")] public Input? GcfsConfig { get; set; } + /// + /// Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + [Input("insecureKubeletReadonlyPortEnabled")] + public Input? InsecureKubeletReadonlyPortEnabled { get; set; } + /// /// The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. /// diff --git a/sdk/dotnet/Container/Inputs/ClusterNodePoolNodeConfigKubeletConfigArgs.cs b/sdk/dotnet/Container/Inputs/ClusterNodePoolNodeConfigKubeletConfigArgs.cs index 0ef906790a..3e9897505a 100644 --- a/sdk/dotnet/Container/Inputs/ClusterNodePoolNodeConfigKubeletConfigArgs.cs +++ b/sdk/dotnet/Container/Inputs/ClusterNodePoolNodeConfigKubeletConfigArgs.cs @@ -41,6 +41,12 @@ public sealed class ClusterNodePoolNodeConfigKubeletConfigArgs : global::Pulumi. [Input("cpuManagerPolicy", required: true)] public Input CpuManagerPolicy { get; set; } = null!; + /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + [Input("insecureKubeletReadonlyPortEnabled")] + public Input? InsecureKubeletReadonlyPortEnabled { get; set; } + /// /// Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. /// diff --git a/sdk/dotnet/Container/Inputs/ClusterNodePoolNodeConfigKubeletConfigGetArgs.cs b/sdk/dotnet/Container/Inputs/ClusterNodePoolNodeConfigKubeletConfigGetArgs.cs index 54b6f7fdba..7fc7a3b9cf 100644 --- a/sdk/dotnet/Container/Inputs/ClusterNodePoolNodeConfigKubeletConfigGetArgs.cs +++ b/sdk/dotnet/Container/Inputs/ClusterNodePoolNodeConfigKubeletConfigGetArgs.cs @@ -41,6 +41,12 @@ public sealed class ClusterNodePoolNodeConfigKubeletConfigGetArgs : global::Pulu [Input("cpuManagerPolicy", required: true)] public Input CpuManagerPolicy { get; set; } = null!; + /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + [Input("insecureKubeletReadonlyPortEnabled")] + public Input? InsecureKubeletReadonlyPortEnabled { get; set; } + /// /// Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. /// diff --git a/sdk/dotnet/Container/Inputs/NodePoolNodeConfigKubeletConfigArgs.cs b/sdk/dotnet/Container/Inputs/NodePoolNodeConfigKubeletConfigArgs.cs index d1f3d8d0ea..59f2f8c10e 100644 --- a/sdk/dotnet/Container/Inputs/NodePoolNodeConfigKubeletConfigArgs.cs +++ b/sdk/dotnet/Container/Inputs/NodePoolNodeConfigKubeletConfigArgs.cs @@ -30,6 +30,12 @@ public sealed class NodePoolNodeConfigKubeletConfigArgs : global::Pulumi.Resourc [Input("cpuManagerPolicy", required: true)] public Input CpuManagerPolicy { get; set; } = null!; + /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + [Input("insecureKubeletReadonlyPortEnabled")] + public Input? InsecureKubeletReadonlyPortEnabled { get; set; } + /// /// Controls the maximum number of processes allowed to run in a pod. /// diff --git a/sdk/dotnet/Container/Inputs/NodePoolNodeConfigKubeletConfigGetArgs.cs b/sdk/dotnet/Container/Inputs/NodePoolNodeConfigKubeletConfigGetArgs.cs index 231cb3146f..27f5c6b581 100644 --- a/sdk/dotnet/Container/Inputs/NodePoolNodeConfigKubeletConfigGetArgs.cs +++ b/sdk/dotnet/Container/Inputs/NodePoolNodeConfigKubeletConfigGetArgs.cs @@ -30,6 +30,12 @@ public sealed class NodePoolNodeConfigKubeletConfigGetArgs : global::Pulumi.Reso [Input("cpuManagerPolicy", required: true)] public Input CpuManagerPolicy { get; set; } = null!; + /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + [Input("insecureKubeletReadonlyPortEnabled")] + public Input? InsecureKubeletReadonlyPortEnabled { get; set; } + /// /// Controls the maximum number of processes allowed to run in a pod. /// diff --git a/sdk/dotnet/Container/Outputs/ClusterNodeConfigKubeletConfig.cs b/sdk/dotnet/Container/Outputs/ClusterNodeConfigKubeletConfig.cs index c558db2b9f..aa4b07aab8 100644 --- a/sdk/dotnet/Container/Outputs/ClusterNodeConfigKubeletConfig.cs +++ b/sdk/dotnet/Container/Outputs/ClusterNodeConfigKubeletConfig.cs @@ -37,6 +37,10 @@ public sealed class ClusterNodeConfigKubeletConfig /// public readonly string CpuManagerPolicy; /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + public readonly string? InsecureKubeletReadonlyPortEnabled; + /// /// Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. /// public readonly int? PodPidsLimit; @@ -49,11 +53,14 @@ private ClusterNodeConfigKubeletConfig( string cpuManagerPolicy, + string? insecureKubeletReadonlyPortEnabled, + int? podPidsLimit) { CpuCfsQuota = cpuCfsQuota; CpuCfsQuotaPeriod = cpuCfsQuotaPeriod; CpuManagerPolicy = cpuManagerPolicy; + InsecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; PodPidsLimit = podPidsLimit; } } diff --git a/sdk/dotnet/Container/Outputs/ClusterNodePoolAutoConfig.cs b/sdk/dotnet/Container/Outputs/ClusterNodePoolAutoConfig.cs index 759476e60d..57f44e1a83 100644 --- a/sdk/dotnet/Container/Outputs/ClusterNodePoolAutoConfig.cs +++ b/sdk/dotnet/Container/Outputs/ClusterNodePoolAutoConfig.cs @@ -14,10 +14,15 @@ namespace Pulumi.Gcp.Container.Outputs public sealed class ClusterNodePoolAutoConfig { /// - /// The network tag config for the cluster's automatically provisioned node pools. + /// The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. /// public readonly Outputs.ClusterNodePoolAutoConfigNetworkTags? NetworkTags; /// + /// Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here. + /// Structure is documented below. + /// + public readonly Outputs.ClusterNodePoolAutoConfigNodeKubeletConfig? NodeKubeletConfig; + /// /// A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. /// public readonly ImmutableDictionary? ResourceManagerTags; @@ -26,9 +31,12 @@ public sealed class ClusterNodePoolAutoConfig private ClusterNodePoolAutoConfig( Outputs.ClusterNodePoolAutoConfigNetworkTags? networkTags, + Outputs.ClusterNodePoolAutoConfigNodeKubeletConfig? nodeKubeletConfig, + ImmutableDictionary? resourceManagerTags) { NetworkTags = networkTags; + NodeKubeletConfig = nodeKubeletConfig; ResourceManagerTags = resourceManagerTags; } } diff --git a/sdk/dotnet/Container/Outputs/ClusterNodePoolAutoConfigNodeKubeletConfig.cs b/sdk/dotnet/Container/Outputs/ClusterNodePoolAutoConfigNodeKubeletConfig.cs new file mode 100644 index 0000000000..be3214974a --- /dev/null +++ b/sdk/dotnet/Container/Outputs/ClusterNodePoolAutoConfigNodeKubeletConfig.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Container.Outputs +{ + + [OutputType] + public sealed class ClusterNodePoolAutoConfigNodeKubeletConfig + { + /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + public readonly string? InsecureKubeletReadonlyPortEnabled; + + [OutputConstructor] + private ClusterNodePoolAutoConfigNodeKubeletConfig(string? insecureKubeletReadonlyPortEnabled) + { + InsecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + } + } +} diff --git a/sdk/dotnet/Container/Outputs/ClusterNodePoolDefaultsNodeConfigDefaults.cs b/sdk/dotnet/Container/Outputs/ClusterNodePoolDefaultsNodeConfigDefaults.cs index e2285e3aeb..d3e72152c6 100644 --- a/sdk/dotnet/Container/Outputs/ClusterNodePoolDefaultsNodeConfigDefaults.cs +++ b/sdk/dotnet/Container/Outputs/ClusterNodePoolDefaultsNodeConfigDefaults.cs @@ -22,6 +22,10 @@ public sealed class ClusterNodePoolDefaultsNodeConfigDefaults /// public readonly Outputs.ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfig? GcfsConfig; /// + /// Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + public readonly string? InsecureKubeletReadonlyPortEnabled; + /// /// The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. /// public readonly string? LoggingVariant; @@ -32,10 +36,13 @@ private ClusterNodePoolDefaultsNodeConfigDefaults( Outputs.ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfig? gcfsConfig, + string? insecureKubeletReadonlyPortEnabled, + string? loggingVariant) { ContainerdConfig = containerdConfig; GcfsConfig = gcfsConfig; + InsecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; LoggingVariant = loggingVariant; } } diff --git a/sdk/dotnet/Container/Outputs/ClusterNodePoolNodeConfigKubeletConfig.cs b/sdk/dotnet/Container/Outputs/ClusterNodePoolNodeConfigKubeletConfig.cs index 4ccad48c30..86cfcfe06e 100644 --- a/sdk/dotnet/Container/Outputs/ClusterNodePoolNodeConfigKubeletConfig.cs +++ b/sdk/dotnet/Container/Outputs/ClusterNodePoolNodeConfigKubeletConfig.cs @@ -37,6 +37,10 @@ public sealed class ClusterNodePoolNodeConfigKubeletConfig /// public readonly string CpuManagerPolicy; /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + public readonly string? InsecureKubeletReadonlyPortEnabled; + /// /// Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. /// public readonly int? PodPidsLimit; @@ -49,11 +53,14 @@ private ClusterNodePoolNodeConfigKubeletConfig( string cpuManagerPolicy, + string? insecureKubeletReadonlyPortEnabled, + int? podPidsLimit) { CpuCfsQuota = cpuCfsQuota; CpuCfsQuotaPeriod = cpuCfsQuotaPeriod; CpuManagerPolicy = cpuManagerPolicy; + InsecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; PodPidsLimit = podPidsLimit; } } diff --git a/sdk/dotnet/Container/Outputs/GetClusterNodeConfigKubeletConfigResult.cs b/sdk/dotnet/Container/Outputs/GetClusterNodeConfigKubeletConfigResult.cs index 931543d52d..204cd4c66e 100644 --- a/sdk/dotnet/Container/Outputs/GetClusterNodeConfigKubeletConfigResult.cs +++ b/sdk/dotnet/Container/Outputs/GetClusterNodeConfigKubeletConfigResult.cs @@ -26,6 +26,10 @@ public sealed class GetClusterNodeConfigKubeletConfigResult /// public readonly string CpuManagerPolicy; /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + public readonly string InsecureKubeletReadonlyPortEnabled; + /// /// Controls the maximum number of processes allowed to run in a pod. /// public readonly int PodPidsLimit; @@ -38,11 +42,14 @@ private GetClusterNodeConfigKubeletConfigResult( string cpuManagerPolicy, + string insecureKubeletReadonlyPortEnabled, + int podPidsLimit) { CpuCfsQuota = cpuCfsQuota; CpuCfsQuotaPeriod = cpuCfsQuotaPeriod; CpuManagerPolicy = cpuManagerPolicy; + InsecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; PodPidsLimit = podPidsLimit; } } diff --git a/sdk/dotnet/Container/Outputs/GetClusterNodePoolAutoConfigNodeKubeletConfigResult.cs b/sdk/dotnet/Container/Outputs/GetClusterNodePoolAutoConfigNodeKubeletConfigResult.cs new file mode 100644 index 0000000000..9f5f7b6c7b --- /dev/null +++ b/sdk/dotnet/Container/Outputs/GetClusterNodePoolAutoConfigNodeKubeletConfigResult.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Container.Outputs +{ + + [OutputType] + public sealed class GetClusterNodePoolAutoConfigNodeKubeletConfigResult + { + /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + public readonly string InsecureKubeletReadonlyPortEnabled; + + [OutputConstructor] + private GetClusterNodePoolAutoConfigNodeKubeletConfigResult(string insecureKubeletReadonlyPortEnabled) + { + InsecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + } + } +} diff --git a/sdk/dotnet/Container/Outputs/GetClusterNodePoolAutoConfigResult.cs b/sdk/dotnet/Container/Outputs/GetClusterNodePoolAutoConfigResult.cs index 74fa92dc90..2e4210a6c8 100644 --- a/sdk/dotnet/Container/Outputs/GetClusterNodePoolAutoConfigResult.cs +++ b/sdk/dotnet/Container/Outputs/GetClusterNodePoolAutoConfigResult.cs @@ -18,6 +18,10 @@ public sealed class GetClusterNodePoolAutoConfigResult /// public readonly ImmutableArray NetworkTags; /// + /// Node kubelet configs. + /// + public readonly ImmutableArray NodeKubeletConfigs; + /// /// A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. /// public readonly ImmutableDictionary ResourceManagerTags; @@ -26,9 +30,12 @@ public sealed class GetClusterNodePoolAutoConfigResult private GetClusterNodePoolAutoConfigResult( ImmutableArray networkTags, + ImmutableArray nodeKubeletConfigs, + ImmutableDictionary resourceManagerTags) { NetworkTags = networkTags; + NodeKubeletConfigs = nodeKubeletConfigs; ResourceManagerTags = resourceManagerTags; } } diff --git a/sdk/dotnet/Container/Outputs/GetClusterNodePoolDefaultNodeConfigDefaultResult.cs b/sdk/dotnet/Container/Outputs/GetClusterNodePoolDefaultNodeConfigDefaultResult.cs index f8be8d15dc..7721a07c26 100644 --- a/sdk/dotnet/Container/Outputs/GetClusterNodePoolDefaultNodeConfigDefaultResult.cs +++ b/sdk/dotnet/Container/Outputs/GetClusterNodePoolDefaultNodeConfigDefaultResult.cs @@ -22,6 +22,10 @@ public sealed class GetClusterNodePoolDefaultNodeConfigDefaultResult /// public readonly ImmutableArray GcfsConfigs; /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + public readonly string InsecureKubeletReadonlyPortEnabled; + /// /// Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. /// public readonly string LoggingVariant; @@ -32,10 +36,13 @@ private GetClusterNodePoolDefaultNodeConfigDefaultResult( ImmutableArray gcfsConfigs, + string insecureKubeletReadonlyPortEnabled, + string loggingVariant) { ContainerdConfigs = containerdConfigs; GcfsConfigs = gcfsConfigs; + InsecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; LoggingVariant = loggingVariant; } } diff --git a/sdk/dotnet/Container/Outputs/GetClusterNodePoolNodeConfigKubeletConfigResult.cs b/sdk/dotnet/Container/Outputs/GetClusterNodePoolNodeConfigKubeletConfigResult.cs index 3532bee0d3..d1f7e99f1a 100644 --- a/sdk/dotnet/Container/Outputs/GetClusterNodePoolNodeConfigKubeletConfigResult.cs +++ b/sdk/dotnet/Container/Outputs/GetClusterNodePoolNodeConfigKubeletConfigResult.cs @@ -26,6 +26,10 @@ public sealed class GetClusterNodePoolNodeConfigKubeletConfigResult /// public readonly string CpuManagerPolicy; /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + public readonly string InsecureKubeletReadonlyPortEnabled; + /// /// Controls the maximum number of processes allowed to run in a pod. /// public readonly int PodPidsLimit; @@ -38,11 +42,14 @@ private GetClusterNodePoolNodeConfigKubeletConfigResult( string cpuManagerPolicy, + string insecureKubeletReadonlyPortEnabled, + int podPidsLimit) { CpuCfsQuota = cpuCfsQuota; CpuCfsQuotaPeriod = cpuCfsQuotaPeriod; CpuManagerPolicy = cpuManagerPolicy; + InsecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; PodPidsLimit = podPidsLimit; } } diff --git a/sdk/dotnet/Container/Outputs/NodePoolNodeConfigKubeletConfig.cs b/sdk/dotnet/Container/Outputs/NodePoolNodeConfigKubeletConfig.cs index 0b64655b0a..4d96dd39b4 100644 --- a/sdk/dotnet/Container/Outputs/NodePoolNodeConfigKubeletConfig.cs +++ b/sdk/dotnet/Container/Outputs/NodePoolNodeConfigKubeletConfig.cs @@ -26,6 +26,10 @@ public sealed class NodePoolNodeConfigKubeletConfig /// public readonly string CpuManagerPolicy; /// + /// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + /// + public readonly string? InsecureKubeletReadonlyPortEnabled; + /// /// Controls the maximum number of processes allowed to run in a pod. /// public readonly int? PodPidsLimit; @@ -38,11 +42,14 @@ private NodePoolNodeConfigKubeletConfig( string cpuManagerPolicy, + string? insecureKubeletReadonlyPortEnabled, + int? podPidsLimit) { CpuCfsQuota = cpuCfsQuota; CpuCfsQuotaPeriod = cpuCfsQuotaPeriod; CpuManagerPolicy = cpuManagerPolicy; + InsecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; PodPidsLimit = podPidsLimit; } } diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionArgs.cs index 285fc720cf..d4ef0f5ecd 100644 --- a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionArgs.cs +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionArgs.cs @@ -26,6 +26,13 @@ public sealed class PreventionDiscoveryConfigActionArgs : global::Pulumi.Resourc [Input("pubSubNotification")] public Input? PubSubNotification { get; set; } + /// + /// Publish a message into the Pub/Sub topic. + /// Structure is documented below. + /// + [Input("tagResources")] + public Input? TagResources { get; set; } + public PreventionDiscoveryConfigActionArgs() { } diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionGetArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionGetArgs.cs index d88be9e318..e03d20f688 100644 --- a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionGetArgs.cs +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionGetArgs.cs @@ -26,6 +26,13 @@ public sealed class PreventionDiscoveryConfigActionGetArgs : global::Pulumi.Reso [Input("pubSubNotification")] public Input? PubSubNotification { get; set; } + /// + /// Publish a message into the Pub/Sub topic. + /// Structure is documented below. + /// + [Input("tagResources")] + public Input? TagResources { get; set; } + public PreventionDiscoveryConfigActionGetArgs() { } diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesArgs.cs new file mode 100644 index 0000000000..6f0eaaf928 --- /dev/null +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesArgs.cs @@ -0,0 +1,52 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Inputs +{ + + public sealed class PreventionDiscoveryConfigActionTagResourcesArgs : global::Pulumi.ResourceArgs + { + /// + /// Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + /// + [Input("lowerDataRiskToLow")] + public Input? LowerDataRiskToLow { get; set; } + + [Input("profileGenerationsToTags")] + private InputList? _profileGenerationsToTags; + + /// + /// The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + /// Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + /// + public InputList ProfileGenerationsToTags + { + get => _profileGenerationsToTags ?? (_profileGenerationsToTags = new InputList()); + set => _profileGenerationsToTags = value; + } + + [Input("tagConditions")] + private InputList? _tagConditions; + + /// + /// The tags to associate with different conditions. + /// Structure is documented below. + /// + public InputList TagConditions + { + get => _tagConditions ?? (_tagConditions = new InputList()); + set => _tagConditions = value; + } + + public PreventionDiscoveryConfigActionTagResourcesArgs() + { + } + public static new PreventionDiscoveryConfigActionTagResourcesArgs Empty => new PreventionDiscoveryConfigActionTagResourcesArgs(); + } +} diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesGetArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesGetArgs.cs new file mode 100644 index 0000000000..e6bd0f99c0 --- /dev/null +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesGetArgs.cs @@ -0,0 +1,52 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Inputs +{ + + public sealed class PreventionDiscoveryConfigActionTagResourcesGetArgs : global::Pulumi.ResourceArgs + { + /// + /// Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + /// + [Input("lowerDataRiskToLow")] + public Input? LowerDataRiskToLow { get; set; } + + [Input("profileGenerationsToTags")] + private InputList? _profileGenerationsToTags; + + /// + /// The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + /// Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + /// + public InputList ProfileGenerationsToTags + { + get => _profileGenerationsToTags ?? (_profileGenerationsToTags = new InputList()); + set => _profileGenerationsToTags = value; + } + + [Input("tagConditions")] + private InputList? _tagConditions; + + /// + /// The tags to associate with different conditions. + /// Structure is documented below. + /// + public InputList TagConditions + { + get => _tagConditions ?? (_tagConditions = new InputList()); + set => _tagConditions = value; + } + + public PreventionDiscoveryConfigActionTagResourcesGetArgs() + { + } + public static new PreventionDiscoveryConfigActionTagResourcesGetArgs Empty => new PreventionDiscoveryConfigActionTagResourcesGetArgs(); + } +} diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionArgs.cs new file mode 100644 index 0000000000..4782fcd85a --- /dev/null +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionArgs.cs @@ -0,0 +1,34 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Inputs +{ + + public sealed class PreventionDiscoveryConfigActionTagResourcesTagConditionArgs : global::Pulumi.ResourceArgs + { + /// + /// Conditions attaching the tag to a resource on its profile having this sensitivity score. + /// Structure is documented below. + /// + [Input("sensitivityScore")] + public Input? SensitivityScore { get; set; } + + /// + /// The tag value to attach to resources. + /// Structure is documented below. + /// + [Input("tag")] + public Input? Tag { get; set; } + + public PreventionDiscoveryConfigActionTagResourcesTagConditionArgs() + { + } + public static new PreventionDiscoveryConfigActionTagResourcesTagConditionArgs Empty => new PreventionDiscoveryConfigActionTagResourcesTagConditionArgs(); + } +} diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionGetArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionGetArgs.cs new file mode 100644 index 0000000000..10846adfac --- /dev/null +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionGetArgs.cs @@ -0,0 +1,34 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Inputs +{ + + public sealed class PreventionDiscoveryConfigActionTagResourcesTagConditionGetArgs : global::Pulumi.ResourceArgs + { + /// + /// Conditions attaching the tag to a resource on its profile having this sensitivity score. + /// Structure is documented below. + /// + [Input("sensitivityScore")] + public Input? SensitivityScore { get; set; } + + /// + /// The tag value to attach to resources. + /// Structure is documented below. + /// + [Input("tag")] + public Input? Tag { get; set; } + + public PreventionDiscoveryConfigActionTagResourcesTagConditionGetArgs() + { + } + public static new PreventionDiscoveryConfigActionTagResourcesTagConditionGetArgs Empty => new PreventionDiscoveryConfigActionTagResourcesTagConditionGetArgs(); + } +} diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs.cs new file mode 100644 index 0000000000..e18b5955aa --- /dev/null +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Inputs +{ + + public sealed class PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs : global::Pulumi.ResourceArgs + { + /// + /// The sensitivity score applied to the resource. + /// Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + /// + [Input("score", required: true)] + public Input Score { get; set; } = null!; + + public PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs() + { + } + public static new PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs Empty => new PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs(); + } +} diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreGetArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreGetArgs.cs new file mode 100644 index 0000000000..aecb3b14b4 --- /dev/null +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreGetArgs.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Inputs +{ + + public sealed class PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreGetArgs : global::Pulumi.ResourceArgs + { + /// + /// The sensitivity score applied to the resource. + /// Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + /// + [Input("score", required: true)] + public Input Score { get; set; } = null!; + + public PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreGetArgs() + { + } + public static new PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreGetArgs Empty => new PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreGetArgs(); + } +} diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs.cs new file mode 100644 index 0000000000..eb7413cdb7 --- /dev/null +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Inputs +{ + + public sealed class PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs : global::Pulumi.ResourceArgs + { + /// + /// The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + /// + [Input("namespacedValue")] + public Input? NamespacedValue { get; set; } + + public PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs() + { + } + public static new PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs Empty => new PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs(); + } +} diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTagGetArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTagGetArgs.cs new file mode 100644 index 0000000000..c4e892f1cb --- /dev/null +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTagGetArgs.cs @@ -0,0 +1,26 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Inputs +{ + + public sealed class PreventionDiscoveryConfigActionTagResourcesTagConditionTagGetArgs : global::Pulumi.ResourceArgs + { + /// + /// The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + /// + [Input("namespacedValue")] + public Input? NamespacedValue { get; set; } + + public PreventionDiscoveryConfigActionTagResourcesTagConditionTagGetArgs() + { + } + public static new PreventionDiscoveryConfigActionTagResourcesTagConditionTagGetArgs Empty => new PreventionDiscoveryConfigActionTagResourcesTagConditionTagGetArgs(); + } +} diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs.cs index f17072f794..b2a73cb140 100644 --- a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs.cs +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs.cs @@ -12,6 +12,13 @@ namespace Pulumi.Gcp.DataLoss.Inputs public sealed class PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs : global::Pulumi.ResourceArgs { + /// + /// Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + /// Structure is documented below. + /// + [Input("inspectTemplateModifiedCadence")] + public Input? InspectTemplateModifiedCadence { get; set; } + /// /// Governs when to update data profiles when a schema is modified /// Structure is documented below. diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceGetArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceGetArgs.cs index 37f7dcc6ef..a460576ad3 100644 --- a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceGetArgs.cs +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceGetArgs.cs @@ -12,6 +12,13 @@ namespace Pulumi.Gcp.DataLoss.Inputs public sealed class PreventionDiscoveryConfigTargetBigQueryTargetCadenceGetArgs : global::Pulumi.ResourceArgs { + /// + /// Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + /// Structure is documented below. + /// + [Input("inspectTemplateModifiedCadence")] + public Input? InspectTemplateModifiedCadence { get; set; } + /// /// Governs when to update data profiles when a schema is modified /// Structure is documented below. diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs.cs new file mode 100644 index 0000000000..3a306f0ff6 --- /dev/null +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Inputs +{ + + public sealed class PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs : global::Pulumi.ResourceArgs + { + /// + /// How frequently data profiles can be updated when the template is modified. Defaults to never. + /// Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + /// + [Input("frequency")] + public Input? Frequency { get; set; } + + public PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs() + { + } + public static new PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs Empty => new PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs(); + } +} diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceGetArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceGetArgs.cs new file mode 100644 index 0000000000..fa3db9ca78 --- /dev/null +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceGetArgs.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Inputs +{ + + public sealed class PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceGetArgs : global::Pulumi.ResourceArgs + { + /// + /// How frequently data profiles can be updated when the template is modified. Defaults to never. + /// Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + /// + [Input("frequency")] + public Input? Frequency { get; set; } + + public PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceGetArgs() + { + } + public static new PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceGetArgs Empty => new PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceGetArgs(); + } +} diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs.cs index 2970a7dd4b..7cbaac5ee5 100644 --- a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs.cs +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs.cs @@ -12,6 +12,13 @@ namespace Pulumi.Gcp.DataLoss.Inputs public sealed class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs : global::Pulumi.ResourceArgs { + /// + /// Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + /// Structure is documented below. + /// + [Input("inspectTemplateModifiedCadence")] + public Input? InspectTemplateModifiedCadence { get; set; } + /// /// Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. /// Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceGetArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceGetArgs.cs index 09981677d4..eedf9dd6df 100644 --- a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceGetArgs.cs +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceGetArgs.cs @@ -12,6 +12,13 @@ namespace Pulumi.Gcp.DataLoss.Inputs public sealed class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceGetArgs : global::Pulumi.ResourceArgs { + /// + /// Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + /// Structure is documented below. + /// + [Input("inspectTemplateModifiedCadence")] + public Input? InspectTemplateModifiedCadence { get; set; } + /// /// Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. /// Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs.cs new file mode 100644 index 0000000000..4415ea0c07 --- /dev/null +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Inputs +{ + + public sealed class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs : global::Pulumi.ResourceArgs + { + /// + /// How frequently data profiles can be updated when the template is modified. Defaults to never. + /// Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + /// + [Input("frequency", required: true)] + public Input Frequency { get; set; } = null!; + + public PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs() + { + } + public static new PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs Empty => new PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs(); + } +} diff --git a/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceGetArgs.cs b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceGetArgs.cs new file mode 100644 index 0000000000..346eb31852 --- /dev/null +++ b/sdk/dotnet/DataLoss/Inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceGetArgs.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Inputs +{ + + public sealed class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceGetArgs : global::Pulumi.ResourceArgs + { + /// + /// How frequently data profiles can be updated when the template is modified. Defaults to never. + /// Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + /// + [Input("frequency", required: true)] + public Input Frequency { get; set; } = null!; + + public PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceGetArgs() + { + } + public static new PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceGetArgs Empty => new PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceGetArgs(); + } +} diff --git a/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigAction.cs b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigAction.cs index e57091282d..714056198f 100644 --- a/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigAction.cs +++ b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigAction.cs @@ -23,15 +23,23 @@ public sealed class PreventionDiscoveryConfigAction /// Structure is documented below. /// public readonly Outputs.PreventionDiscoveryConfigActionPubSubNotification? PubSubNotification; + /// + /// Publish a message into the Pub/Sub topic. + /// Structure is documented below. + /// + public readonly Outputs.PreventionDiscoveryConfigActionTagResources? TagResources; [OutputConstructor] private PreventionDiscoveryConfigAction( Outputs.PreventionDiscoveryConfigActionExportData? exportData, - Outputs.PreventionDiscoveryConfigActionPubSubNotification? pubSubNotification) + Outputs.PreventionDiscoveryConfigActionPubSubNotification? pubSubNotification, + + Outputs.PreventionDiscoveryConfigActionTagResources? tagResources) { ExportData = exportData; PubSubNotification = pubSubNotification; + TagResources = tagResources; } } } diff --git a/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResources.cs b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResources.cs new file mode 100644 index 0000000000..9d35c9e993 --- /dev/null +++ b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResources.cs @@ -0,0 +1,44 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Outputs +{ + + [OutputType] + public sealed class PreventionDiscoveryConfigActionTagResources + { + /// + /// Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + /// + public readonly bool? LowerDataRiskToLow; + /// + /// The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + /// Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + /// + public readonly ImmutableArray ProfileGenerationsToTags; + /// + /// The tags to associate with different conditions. + /// Structure is documented below. + /// + public readonly ImmutableArray TagConditions; + + [OutputConstructor] + private PreventionDiscoveryConfigActionTagResources( + bool? lowerDataRiskToLow, + + ImmutableArray profileGenerationsToTags, + + ImmutableArray tagConditions) + { + LowerDataRiskToLow = lowerDataRiskToLow; + ProfileGenerationsToTags = profileGenerationsToTags; + TagConditions = tagConditions; + } + } +} diff --git a/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResourcesTagCondition.cs b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResourcesTagCondition.cs new file mode 100644 index 0000000000..24b50c5388 --- /dev/null +++ b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResourcesTagCondition.cs @@ -0,0 +1,37 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Outputs +{ + + [OutputType] + public sealed class PreventionDiscoveryConfigActionTagResourcesTagCondition + { + /// + /// Conditions attaching the tag to a resource on its profile having this sensitivity score. + /// Structure is documented below. + /// + public readonly Outputs.PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore? SensitivityScore; + /// + /// The tag value to attach to resources. + /// Structure is documented below. + /// + public readonly Outputs.PreventionDiscoveryConfigActionTagResourcesTagConditionTag? Tag; + + [OutputConstructor] + private PreventionDiscoveryConfigActionTagResourcesTagCondition( + Outputs.PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore? sensitivityScore, + + Outputs.PreventionDiscoveryConfigActionTagResourcesTagConditionTag? tag) + { + SensitivityScore = sensitivityScore; + Tag = tag; + } + } +} diff --git a/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore.cs b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore.cs new file mode 100644 index 0000000000..0769fc2e0a --- /dev/null +++ b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Outputs +{ + + [OutputType] + public sealed class PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore + { + /// + /// The sensitivity score applied to the resource. + /// Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + /// + public readonly string Score; + + [OutputConstructor] + private PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore(string score) + { + Score = score; + } + } +} diff --git a/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTag.cs b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTag.cs new file mode 100644 index 0000000000..7b492bd6cf --- /dev/null +++ b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTag.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Outputs +{ + + [OutputType] + public sealed class PreventionDiscoveryConfigActionTagResourcesTagConditionTag + { + /// + /// The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + /// + public readonly string? NamespacedValue; + + [OutputConstructor] + private PreventionDiscoveryConfigActionTagResourcesTagConditionTag(string? namespacedValue) + { + NamespacedValue = namespacedValue; + } + } +} diff --git a/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadence.cs b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadence.cs index f4089ca875..e3affccd8b 100644 --- a/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadence.cs +++ b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadence.cs @@ -13,6 +13,11 @@ namespace Pulumi.Gcp.DataLoss.Outputs [OutputType] public sealed class PreventionDiscoveryConfigTargetBigQueryTargetCadence { + /// + /// Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + /// Structure is documented below. + /// + public readonly Outputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence? InspectTemplateModifiedCadence; /// /// Governs when to update data profiles when a schema is modified /// Structure is documented below. @@ -26,10 +31,13 @@ public sealed class PreventionDiscoveryConfigTargetBigQueryTargetCadence [OutputConstructor] private PreventionDiscoveryConfigTargetBigQueryTargetCadence( + Outputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence? inspectTemplateModifiedCadence, + Outputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence? schemaModifiedCadence, Outputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadence? tableModifiedCadence) { + InspectTemplateModifiedCadence = inspectTemplateModifiedCadence; SchemaModifiedCadence = schemaModifiedCadence; TableModifiedCadence = tableModifiedCadence; } diff --git a/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence.cs b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence.cs new file mode 100644 index 0000000000..01b37a76db --- /dev/null +++ b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Outputs +{ + + [OutputType] + public sealed class PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence + { + /// + /// How frequently data profiles can be updated when the template is modified. Defaults to never. + /// Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + /// + public readonly string? Frequency; + + [OutputConstructor] + private PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence(string? frequency) + { + Frequency = frequency; + } + } +} diff --git a/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence.cs b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence.cs index 2809a35d7a..f3979d23f6 100644 --- a/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence.cs +++ b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence.cs @@ -13,6 +13,11 @@ namespace Pulumi.Gcp.DataLoss.Outputs [OutputType] public sealed class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence { + /// + /// Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + /// Structure is documented below. + /// + public readonly Outputs.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence? InspectTemplateModifiedCadence; /// /// Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. /// Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. @@ -26,10 +31,13 @@ public sealed class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCaden [OutputConstructor] private PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence( + Outputs.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence? inspectTemplateModifiedCadence, + string? refreshFrequency, Outputs.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence? schemaModifiedCadence) { + InspectTemplateModifiedCadence = inspectTemplateModifiedCadence; RefreshFrequency = refreshFrequency; SchemaModifiedCadence = schemaModifiedCadence; } diff --git a/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence.cs b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence.cs new file mode 100644 index 0000000000..1092319f40 --- /dev/null +++ b/sdk/dotnet/DataLoss/Outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DataLoss.Outputs +{ + + [OutputType] + public sealed class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence + { + /// + /// How frequently data profiles can be updated when the template is modified. Defaults to never. + /// Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + /// + public readonly string Frequency; + + [OutputConstructor] + private PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence(string frequency) + { + Frequency = frequency; + } + } +} diff --git a/sdk/dotnet/DatabaseMigrationService/ConnectionProfile.cs b/sdk/dotnet/DatabaseMigrationService/ConnectionProfile.cs index 5a4746974e..8a8cc8d66b 100644 --- a/sdk/dotnet/DatabaseMigrationService/ConnectionProfile.cs +++ b/sdk/dotnet/DatabaseMigrationService/ConnectionProfile.cs @@ -341,6 +341,189 @@ namespace Pulumi.Gcp.DatabaseMigrationService /// /// }); /// ``` + /// ### Database Migration Service Connection Profile Existing Mysql + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var project = Gcp.Organizations.GetProject.Invoke(); + /// + /// var destinationCsql = new Gcp.Sql.DatabaseInstance("destination_csql", new() + /// { + /// Name = "destination-csql", + /// DatabaseVersion = "MYSQL_5_7", + /// Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs + /// { + /// Tier = "db-n1-standard-1", + /// DeletionProtectionEnabled = false, + /// }, + /// DeletionProtection = false, + /// }); + /// + /// var existing_mysql = new Gcp.DatabaseMigrationService.ConnectionProfile("existing-mysql", new() + /// { + /// Location = "us-central1", + /// ConnectionProfileId = "destination-cp", + /// DisplayName = "destination-cp_display", + /// Labels = + /// { + /// { "foo", "bar" }, + /// }, + /// Mysql = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfileMysqlArgs + /// { + /// CloudSqlId = "destination-csql", + /// }, + /// }, new CustomResourceOptions + /// { + /// DependsOn = + /// { + /// destinationCsql, + /// }, + /// }); + /// + /// }); + /// ``` + /// ### Database Migration Service Connection Profile Existing Postgres + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var project = Gcp.Organizations.GetProject.Invoke(); + /// + /// var destinationCsql = new Gcp.Sql.DatabaseInstance("destination_csql", new() + /// { + /// Name = "destination-csql", + /// DatabaseVersion = "POSTGRES_15", + /// Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs + /// { + /// Tier = "db-custom-2-13312", + /// DeletionProtectionEnabled = false, + /// }, + /// DeletionProtection = false, + /// }); + /// + /// var existing_psql = new Gcp.DatabaseMigrationService.ConnectionProfile("existing-psql", new() + /// { + /// Location = "us-central1", + /// ConnectionProfileId = "destination-cp", + /// DisplayName = "destination-cp_display", + /// Labels = + /// { + /// { "foo", "bar" }, + /// }, + /// Postgresql = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfilePostgresqlArgs + /// { + /// CloudSqlId = "destination-csql", + /// }, + /// }, new CustomResourceOptions + /// { + /// DependsOn = + /// { + /// destinationCsql, + /// }, + /// }); + /// + /// }); + /// ``` + /// ### Database Migration Service Connection Profile Existing Alloydb + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var project = Gcp.Organizations.GetProject.Invoke(); + /// + /// var @default = new Gcp.Compute.Network("default", new() + /// { + /// Name = "destination-alloydb", + /// }); + /// + /// var destinationAlloydb = new Gcp.Alloydb.Cluster("destination_alloydb", new() + /// { + /// ClusterId = "destination-alloydb", + /// Location = "us-central1", + /// NetworkConfig = new Gcp.Alloydb.Inputs.ClusterNetworkConfigArgs + /// { + /// Network = @default.Id, + /// }, + /// DatabaseVersion = "POSTGRES_15", + /// InitialUser = new Gcp.Alloydb.Inputs.ClusterInitialUserArgs + /// { + /// User = "destination-alloydb", + /// Password = "destination-alloydb", + /// }, + /// }); + /// + /// var privateIpAlloc = new Gcp.Compute.GlobalAddress("private_ip_alloc", new() + /// { + /// Name = "destination-alloydb", + /// AddressType = "INTERNAL", + /// Purpose = "VPC_PEERING", + /// PrefixLength = 16, + /// Network = @default.Id, + /// }); + /// + /// var vpcConnection = new Gcp.ServiceNetworking.Connection("vpc_connection", new() + /// { + /// Network = @default.Id, + /// Service = "servicenetworking.googleapis.com", + /// ReservedPeeringRanges = new[] + /// { + /// privateIpAlloc.Name, + /// }, + /// }); + /// + /// var destinationAlloydbPrimary = new Gcp.Alloydb.Instance("destination_alloydb_primary", new() + /// { + /// Cluster = destinationAlloydb.Name, + /// InstanceId = "destination-alloydb-primary", + /// InstanceType = "PRIMARY", + /// }, new CustomResourceOptions + /// { + /// DependsOn = + /// { + /// vpcConnection, + /// }, + /// }); + /// + /// var existing_alloydb = new Gcp.DatabaseMigrationService.ConnectionProfile("existing-alloydb", new() + /// { + /// Location = "us-central1", + /// ConnectionProfileId = "destination-cp", + /// DisplayName = "destination-cp_display", + /// Labels = + /// { + /// { "foo", "bar" }, + /// }, + /// Postgresql = new Gcp.DatabaseMigrationService.Inputs.ConnectionProfilePostgresqlArgs + /// { + /// AlloydbClusterId = "destination-alloydb", + /// }, + /// }, new CustomResourceOptions + /// { + /// DependsOn = + /// { + /// destinationAlloydb, + /// destinationAlloydbPrimary, + /// }, + /// }); + /// + /// }); + /// ``` /// /// ## Import /// diff --git a/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfileMysqlArgs.cs b/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfileMysqlArgs.cs index d14214ba12..9c8728ba29 100644 --- a/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfileMysqlArgs.cs +++ b/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfileMysqlArgs.cs @@ -19,16 +19,16 @@ public sealed class ConnectionProfileMysqlArgs : global::Pulumi.ResourceArgs public Input? CloudSqlId { get; set; } /// - /// Required. The IP or hostname of the source MySQL database. + /// The IP or hostname of the source MySQL database. /// - [Input("host", required: true)] - public Input Host { get; set; } = null!; + [Input("host")] + public Input? Host { get; set; } - [Input("password", required: true)] + [Input("password")] private Input? _password; /// - /// Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + /// Input only. The password for the user that Database Migration Service will be using to connect to the database. /// This field is not returned on request, and the value is encrypted when stored in Database Migration Service. /// **Note**: This property is sensitive and will not be displayed in the plan. /// @@ -50,10 +50,10 @@ public Input? Password public Input? PasswordSet { get; set; } /// - /// Required. The network port of the source MySQL database. + /// The network port of the source MySQL database. /// - [Input("port", required: true)] - public Input Port { get; set; } = null!; + [Input("port")] + public Input? Port { get; set; } /// /// SSL configuration for the destination to connect to the source database. @@ -63,10 +63,10 @@ public Input? Password public Input? Ssl { get; set; } /// - /// Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + /// The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. /// - [Input("username", required: true)] - public Input Username { get; set; } = null!; + [Input("username")] + public Input? Username { get; set; } public ConnectionProfileMysqlArgs() { diff --git a/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfileMysqlGetArgs.cs b/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfileMysqlGetArgs.cs index b6a8b85010..eb2b44d302 100644 --- a/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfileMysqlGetArgs.cs +++ b/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfileMysqlGetArgs.cs @@ -19,16 +19,16 @@ public sealed class ConnectionProfileMysqlGetArgs : global::Pulumi.ResourceArgs public Input? CloudSqlId { get; set; } /// - /// Required. The IP or hostname of the source MySQL database. + /// The IP or hostname of the source MySQL database. /// - [Input("host", required: true)] - public Input Host { get; set; } = null!; + [Input("host")] + public Input? Host { get; set; } - [Input("password", required: true)] + [Input("password")] private Input? _password; /// - /// Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + /// Input only. The password for the user that Database Migration Service will be using to connect to the database. /// This field is not returned on request, and the value is encrypted when stored in Database Migration Service. /// **Note**: This property is sensitive and will not be displayed in the plan. /// @@ -50,10 +50,10 @@ public Input? Password public Input? PasswordSet { get; set; } /// - /// Required. The network port of the source MySQL database. + /// The network port of the source MySQL database. /// - [Input("port", required: true)] - public Input Port { get; set; } = null!; + [Input("port")] + public Input? Port { get; set; } /// /// SSL configuration for the destination to connect to the source database. @@ -63,10 +63,10 @@ public Input? Password public Input? Ssl { get; set; } /// - /// Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + /// The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. /// - [Input("username", required: true)] - public Input Username { get; set; } = null!; + [Input("username")] + public Input? Username { get; set; } public ConnectionProfileMysqlGetArgs() { diff --git a/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfilePostgresqlArgs.cs b/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfilePostgresqlArgs.cs index a4be6de1fb..c56878091b 100644 --- a/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfilePostgresqlArgs.cs +++ b/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfilePostgresqlArgs.cs @@ -12,6 +12,12 @@ namespace Pulumi.Gcp.DatabaseMigrationService.Inputs public sealed class ConnectionProfilePostgresqlArgs : global::Pulumi.ResourceArgs { + /// + /// If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. + /// + [Input("alloydbClusterId")] + public Input? AlloydbClusterId { get; set; } + /// /// If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. /// @@ -19,10 +25,10 @@ public sealed class ConnectionProfilePostgresqlArgs : global::Pulumi.ResourceArg public Input? CloudSqlId { get; set; } /// - /// Required. The IP or hostname of the source MySQL database. + /// The IP or hostname of the source MySQL database. /// - [Input("host", required: true)] - public Input Host { get; set; } = null!; + [Input("host")] + public Input? Host { get; set; } /// /// (Output) @@ -31,11 +37,11 @@ public sealed class ConnectionProfilePostgresqlArgs : global::Pulumi.ResourceArg [Input("networkArchitecture")] public Input? NetworkArchitecture { get; set; } - [Input("password", required: true)] + [Input("password")] private Input? _password; /// - /// Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + /// Input only. The password for the user that Database Migration Service will be using to connect to the database. /// This field is not returned on request, and the value is encrypted when stored in Database Migration Service. /// **Note**: This property is sensitive and will not be displayed in the plan. /// @@ -57,10 +63,10 @@ public Input? Password public Input? PasswordSet { get; set; } /// - /// Required. The network port of the source MySQL database. + /// The network port of the source MySQL database. /// - [Input("port", required: true)] - public Input Port { get; set; } = null!; + [Input("port")] + public Input? Port { get; set; } /// /// SSL configuration for the destination to connect to the source database. @@ -70,10 +76,10 @@ public Input? Password public Input? Ssl { get; set; } /// - /// Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + /// The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. /// - [Input("username", required: true)] - public Input Username { get; set; } = null!; + [Input("username")] + public Input? Username { get; set; } public ConnectionProfilePostgresqlArgs() { diff --git a/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfilePostgresqlGetArgs.cs b/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfilePostgresqlGetArgs.cs index 352f9fe242..5e192d83ae 100644 --- a/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfilePostgresqlGetArgs.cs +++ b/sdk/dotnet/DatabaseMigrationService/Inputs/ConnectionProfilePostgresqlGetArgs.cs @@ -12,6 +12,12 @@ namespace Pulumi.Gcp.DatabaseMigrationService.Inputs public sealed class ConnectionProfilePostgresqlGetArgs : global::Pulumi.ResourceArgs { + /// + /// If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. + /// + [Input("alloydbClusterId")] + public Input? AlloydbClusterId { get; set; } + /// /// If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. /// @@ -19,10 +25,10 @@ public sealed class ConnectionProfilePostgresqlGetArgs : global::Pulumi.Resource public Input? CloudSqlId { get; set; } /// - /// Required. The IP or hostname of the source MySQL database. + /// The IP or hostname of the source MySQL database. /// - [Input("host", required: true)] - public Input Host { get; set; } = null!; + [Input("host")] + public Input? Host { get; set; } /// /// (Output) @@ -31,11 +37,11 @@ public sealed class ConnectionProfilePostgresqlGetArgs : global::Pulumi.Resource [Input("networkArchitecture")] public Input? NetworkArchitecture { get; set; } - [Input("password", required: true)] + [Input("password")] private Input? _password; /// - /// Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + /// Input only. The password for the user that Database Migration Service will be using to connect to the database. /// This field is not returned on request, and the value is encrypted when stored in Database Migration Service. /// **Note**: This property is sensitive and will not be displayed in the plan. /// @@ -57,10 +63,10 @@ public Input? Password public Input? PasswordSet { get; set; } /// - /// Required. The network port of the source MySQL database. + /// The network port of the source MySQL database. /// - [Input("port", required: true)] - public Input Port { get; set; } = null!; + [Input("port")] + public Input? Port { get; set; } /// /// SSL configuration for the destination to connect to the source database. @@ -70,10 +76,10 @@ public Input? Password public Input? Ssl { get; set; } /// - /// Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + /// The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. /// - [Input("username", required: true)] - public Input Username { get; set; } = null!; + [Input("username")] + public Input? Username { get; set; } public ConnectionProfilePostgresqlGetArgs() { diff --git a/sdk/dotnet/DatabaseMigrationService/Outputs/ConnectionProfileMysql.cs b/sdk/dotnet/DatabaseMigrationService/Outputs/ConnectionProfileMysql.cs index 16832f60d1..eae3e2c0ae 100644 --- a/sdk/dotnet/DatabaseMigrationService/Outputs/ConnectionProfileMysql.cs +++ b/sdk/dotnet/DatabaseMigrationService/Outputs/ConnectionProfileMysql.cs @@ -18,49 +18,49 @@ public sealed class ConnectionProfileMysql /// public readonly string? CloudSqlId; /// - /// Required. The IP or hostname of the source MySQL database. + /// The IP or hostname of the source MySQL database. /// - public readonly string Host; + public readonly string? Host; /// - /// Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + /// Input only. The password for the user that Database Migration Service will be using to connect to the database. /// This field is not returned on request, and the value is encrypted when stored in Database Migration Service. /// **Note**: This property is sensitive and will not be displayed in the plan. /// - public readonly string Password; + public readonly string? Password; /// /// (Output) /// Output only. Indicates If this connection profile password is stored. /// public readonly bool? PasswordSet; /// - /// Required. The network port of the source MySQL database. + /// The network port of the source MySQL database. /// - public readonly int Port; + public readonly int? Port; /// /// SSL configuration for the destination to connect to the source database. /// Structure is documented below. /// public readonly Outputs.ConnectionProfileMysqlSsl? Ssl; /// - /// Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + /// The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. /// - public readonly string Username; + public readonly string? Username; [OutputConstructor] private ConnectionProfileMysql( string? cloudSqlId, - string host, + string? host, - string password, + string? password, bool? passwordSet, - int port, + int? port, Outputs.ConnectionProfileMysqlSsl? ssl, - string username) + string? username) { CloudSqlId = cloudSqlId; Host = host; diff --git a/sdk/dotnet/DatabaseMigrationService/Outputs/ConnectionProfilePostgresql.cs b/sdk/dotnet/DatabaseMigrationService/Outputs/ConnectionProfilePostgresql.cs index c47852aba9..1effd9e7cc 100644 --- a/sdk/dotnet/DatabaseMigrationService/Outputs/ConnectionProfilePostgresql.cs +++ b/sdk/dotnet/DatabaseMigrationService/Outputs/ConnectionProfilePostgresql.cs @@ -13,62 +13,69 @@ namespace Pulumi.Gcp.DatabaseMigrationService.Outputs [OutputType] public sealed class ConnectionProfilePostgresql { + /// + /// If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. + /// + public readonly string? AlloydbClusterId; /// /// If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. /// public readonly string? CloudSqlId; /// - /// Required. The IP or hostname of the source MySQL database. + /// The IP or hostname of the source MySQL database. /// - public readonly string Host; + public readonly string? Host; /// /// (Output) /// Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with. /// public readonly string? NetworkArchitecture; /// - /// Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + /// Input only. The password for the user that Database Migration Service will be using to connect to the database. /// This field is not returned on request, and the value is encrypted when stored in Database Migration Service. /// **Note**: This property is sensitive and will not be displayed in the plan. /// - public readonly string Password; + public readonly string? Password; /// /// (Output) /// Output only. Indicates If this connection profile password is stored. /// public readonly bool? PasswordSet; /// - /// Required. The network port of the source MySQL database. + /// The network port of the source MySQL database. /// - public readonly int Port; + public readonly int? Port; /// /// SSL configuration for the destination to connect to the source database. /// Structure is documented below. /// public readonly Outputs.ConnectionProfilePostgresqlSsl? Ssl; /// - /// Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + /// The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. /// - public readonly string Username; + public readonly string? Username; [OutputConstructor] private ConnectionProfilePostgresql( + string? alloydbClusterId, + string? cloudSqlId, - string host, + string? host, string? networkArchitecture, - string password, + string? password, bool? passwordSet, - int port, + int? port, Outputs.ConnectionProfilePostgresqlSsl? ssl, - string username) + string? username) { + AlloydbClusterId = alloydbClusterId; CloudSqlId = cloudSqlId; Host = host; NetworkArchitecture = networkArchitecture; diff --git a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigArgs.cs b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigArgs.cs index 20e8c53f87..9f7d162862 100644 --- a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigArgs.cs +++ b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigArgs.cs @@ -91,7 +91,7 @@ public InputList? SoftwareConfig { get; set; } /// - /// A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + /// A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). /// [Input("stagingBucket")] public Input? StagingBucket { get; set; } diff --git a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs.cs b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs.cs index c8c63b6ac5..1d772326d7 100644 --- a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs.cs +++ b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs.cs @@ -22,7 +22,7 @@ public sealed class WorkflowTemplatePlacementManagedClusterConfigGceClusterConfi private InputMap? _metadata; /// - /// The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + /// The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). /// public InputMap Metadata { @@ -88,7 +88,7 @@ public InputList ServiceAccountScopes private InputList? _tags; /// - /// The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + /// The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). /// public InputList Tags { diff --git a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigGetArgs.cs b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigGetArgs.cs index 76ba2cdea8..7ae15b9a0c 100644 --- a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigGetArgs.cs +++ b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigGetArgs.cs @@ -22,7 +22,7 @@ public sealed class WorkflowTemplatePlacementManagedClusterConfigGceClusterConfi private InputMap? _metadata; /// - /// The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + /// The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). /// public InputMap Metadata { @@ -88,7 +88,7 @@ public InputList ServiceAccountScopes private InputList? _tags; /// - /// The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + /// The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). /// public InputList Tags { diff --git a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigGetArgs.cs b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigGetArgs.cs index ef7d0533ac..3c539d3fda 100644 --- a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigGetArgs.cs +++ b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigGetArgs.cs @@ -91,7 +91,7 @@ public InputList? SoftwareConfig { get; set; } /// - /// A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + /// A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). /// [Input("stagingBucket")] public Input? StagingBucket { get; set; } diff --git a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs.cs b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs.cs index 2c5a0dec67..8ac5681aef 100644 --- a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs.cs +++ b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs.cs @@ -19,7 +19,7 @@ public sealed class WorkflowTemplatePlacementManagedClusterConfigInitializationA public Input? ExecutableFile { get; set; } /// - /// Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + /// Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. /// [Input("executionTimeout")] public Input? ExecutionTimeout { get; set; } diff --git a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigInitializationActionGetArgs.cs b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigInitializationActionGetArgs.cs index 63b8967033..c57ed4bcfd 100644 --- a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigInitializationActionGetArgs.cs +++ b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigInitializationActionGetArgs.cs @@ -19,7 +19,7 @@ public sealed class WorkflowTemplatePlacementManagedClusterConfigInitializationA public Input? ExecutableFile { get; set; } /// - /// Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + /// Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. /// [Input("executionTimeout")] public Input? ExecutionTimeout { get; set; } diff --git a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs.cs b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs.cs index fc553cde06..ba1ac06fd4 100644 --- a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs.cs +++ b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs.cs @@ -13,25 +13,25 @@ namespace Pulumi.Gcp.Dataproc.Inputs public sealed class WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs : global::Pulumi.ResourceArgs { /// - /// The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + /// The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). /// [Input("autoDeleteTime")] public Input? AutoDeleteTime { get; set; } /// - /// The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + /// The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). /// [Input("autoDeleteTtl")] public Input? AutoDeleteTtl { get; set; } /// - /// The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + /// The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). /// [Input("idleDeleteTtl")] public Input? IdleDeleteTtl { get; set; } /// - /// Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + /// Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). /// [Input("idleStartTime")] public Input? IdleStartTime { get; set; } diff --git a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigGetArgs.cs b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigGetArgs.cs index c1eed72e01..a15f415666 100644 --- a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigGetArgs.cs +++ b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigGetArgs.cs @@ -13,25 +13,25 @@ namespace Pulumi.Gcp.Dataproc.Inputs public sealed class WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigGetArgs : global::Pulumi.ResourceArgs { /// - /// The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + /// The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). /// [Input("autoDeleteTime")] public Input? AutoDeleteTime { get; set; } /// - /// The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + /// The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). /// [Input("autoDeleteTtl")] public Input? AutoDeleteTtl { get; set; } /// - /// The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + /// The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). /// [Input("idleDeleteTtl")] public Input? IdleDeleteTtl { get; set; } /// - /// Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + /// Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). /// [Input("idleStartTime")] public Input? IdleStartTime { get; set; } diff --git a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs.cs b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs.cs index 6ff4f40270..40bc6ca878 100644 --- a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs.cs +++ b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs.cs @@ -73,7 +73,7 @@ public InputList - /// Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + /// Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). /// [Input("minCpuPlatform")] public Input? MinCpuPlatform { get; set; } diff --git a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfigGetArgs.cs b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfigGetArgs.cs index 7d2d22c893..f78743e1c4 100644 --- a/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfigGetArgs.cs +++ b/sdk/dotnet/Dataproc/Inputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfigGetArgs.cs @@ -73,7 +73,7 @@ public InputList - /// Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + /// Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). /// [Input("minCpuPlatform")] public Input? MinCpuPlatform { get; set; } diff --git a/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfig.cs b/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfig.cs index 50b673c419..3b2f9570b2 100644 --- a/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfig.cs +++ b/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfig.cs @@ -62,7 +62,7 @@ public sealed class WorkflowTemplatePlacementManagedClusterConfig /// public readonly Outputs.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig? SoftwareConfig; /// - /// A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + /// A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). /// public readonly string? StagingBucket; /// diff --git a/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig.cs b/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig.cs index 6a0ff60439..8bf29aca5d 100644 --- a/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig.cs +++ b/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig.cs @@ -18,7 +18,7 @@ public sealed class WorkflowTemplatePlacementManagedClusterConfigGceClusterConfi /// public readonly bool? InternalIpOnly; /// - /// The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + /// The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). /// public readonly ImmutableDictionary? Metadata; /// @@ -54,7 +54,7 @@ public sealed class WorkflowTemplatePlacementManagedClusterConfigGceClusterConfi /// public readonly string? Subnetwork; /// - /// The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + /// The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). /// public readonly ImmutableArray Tags; /// diff --git a/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigInitializationAction.cs b/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigInitializationAction.cs index 30a8b224f1..dfd816e4ce 100644 --- a/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigInitializationAction.cs +++ b/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigInitializationAction.cs @@ -18,7 +18,7 @@ public sealed class WorkflowTemplatePlacementManagedClusterConfigInitializationA /// public readonly string? ExecutableFile; /// - /// Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + /// Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. /// public readonly string? ExecutionTimeout; diff --git a/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig.cs b/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig.cs index e09f15974a..ba353a398f 100644 --- a/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig.cs +++ b/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig.cs @@ -14,19 +14,19 @@ namespace Pulumi.Gcp.Dataproc.Outputs public sealed class WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { /// - /// The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + /// The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). /// public readonly string? AutoDeleteTime; /// - /// The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + /// The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). /// public readonly string? AutoDeleteTtl; /// - /// The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + /// The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). /// public readonly string? IdleDeleteTtl; /// - /// Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + /// Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). /// public readonly string? IdleStartTime; diff --git a/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfig.cs b/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfig.cs index 0990dbae2a..c4ab39fea8 100644 --- a/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfig.cs +++ b/sdk/dotnet/Dataproc/Outputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfig.cs @@ -42,7 +42,7 @@ public sealed class WorkflowTemplatePlacementManagedClusterConfigMasterConfig /// public readonly ImmutableArray ManagedGroupConfigs; /// - /// Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + /// Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). /// public readonly string? MinCpuPlatform; /// diff --git a/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigArgs.cs b/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigArgs.cs index 399b893111..816ba28432 100644 --- a/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigArgs.cs +++ b/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigArgs.cs @@ -12,6 +12,12 @@ namespace Pulumi.Gcp.Datastream.Inputs public sealed class StreamSourceConfigSqlServerSourceConfigArgs : global::Pulumi.ResourceArgs { + /// + /// CDC reader reads from change tables. + /// + [Input("changeTables")] + public Input? ChangeTables { get; set; } + /// /// SQL Server objects to exclude from the stream. /// Structure is documented below. @@ -38,6 +44,12 @@ public sealed class StreamSourceConfigSqlServerSourceConfigArgs : global::Pulumi [Input("maxConcurrentCdcTasks")] public Input? MaxConcurrentCdcTasks { get; set; } + /// + /// CDC reader reads from transaction logs. + /// + [Input("transactionLogs")] + public Input? TransactionLogs { get; set; } + public StreamSourceConfigSqlServerSourceConfigArgs() { } diff --git a/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigChangeTablesArgs.cs b/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigChangeTablesArgs.cs new file mode 100644 index 0000000000..d4913eba14 --- /dev/null +++ b/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigChangeTablesArgs.cs @@ -0,0 +1,20 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Datastream.Inputs +{ + + public sealed class StreamSourceConfigSqlServerSourceConfigChangeTablesArgs : global::Pulumi.ResourceArgs + { + public StreamSourceConfigSqlServerSourceConfigChangeTablesArgs() + { + } + public static new StreamSourceConfigSqlServerSourceConfigChangeTablesArgs Empty => new StreamSourceConfigSqlServerSourceConfigChangeTablesArgs(); + } +} diff --git a/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigChangeTablesGetArgs.cs b/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigChangeTablesGetArgs.cs new file mode 100644 index 0000000000..2b8e00351f --- /dev/null +++ b/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigChangeTablesGetArgs.cs @@ -0,0 +1,20 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Datastream.Inputs +{ + + public sealed class StreamSourceConfigSqlServerSourceConfigChangeTablesGetArgs : global::Pulumi.ResourceArgs + { + public StreamSourceConfigSqlServerSourceConfigChangeTablesGetArgs() + { + } + public static new StreamSourceConfigSqlServerSourceConfigChangeTablesGetArgs Empty => new StreamSourceConfigSqlServerSourceConfigChangeTablesGetArgs(); + } +} diff --git a/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigGetArgs.cs b/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigGetArgs.cs index 803a0cef37..f9e80e1434 100644 --- a/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigGetArgs.cs +++ b/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigGetArgs.cs @@ -12,6 +12,12 @@ namespace Pulumi.Gcp.Datastream.Inputs public sealed class StreamSourceConfigSqlServerSourceConfigGetArgs : global::Pulumi.ResourceArgs { + /// + /// CDC reader reads from change tables. + /// + [Input("changeTables")] + public Input? ChangeTables { get; set; } + /// /// SQL Server objects to exclude from the stream. /// Structure is documented below. @@ -38,6 +44,12 @@ public sealed class StreamSourceConfigSqlServerSourceConfigGetArgs : global::Pul [Input("maxConcurrentCdcTasks")] public Input? MaxConcurrentCdcTasks { get; set; } + /// + /// CDC reader reads from transaction logs. + /// + [Input("transactionLogs")] + public Input? TransactionLogs { get; set; } + public StreamSourceConfigSqlServerSourceConfigGetArgs() { } diff --git a/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs.cs b/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs.cs new file mode 100644 index 0000000000..136da027ea --- /dev/null +++ b/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs.cs @@ -0,0 +1,20 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Datastream.Inputs +{ + + public sealed class StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs : global::Pulumi.ResourceArgs + { + public StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs() + { + } + public static new StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs Empty => new StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs(); + } +} diff --git a/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigTransactionLogsGetArgs.cs b/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigTransactionLogsGetArgs.cs new file mode 100644 index 0000000000..0fafaae2f5 --- /dev/null +++ b/sdk/dotnet/Datastream/Inputs/StreamSourceConfigSqlServerSourceConfigTransactionLogsGetArgs.cs @@ -0,0 +1,20 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Datastream.Inputs +{ + + public sealed class StreamSourceConfigSqlServerSourceConfigTransactionLogsGetArgs : global::Pulumi.ResourceArgs + { + public StreamSourceConfigSqlServerSourceConfigTransactionLogsGetArgs() + { + } + public static new StreamSourceConfigSqlServerSourceConfigTransactionLogsGetArgs Empty => new StreamSourceConfigSqlServerSourceConfigTransactionLogsGetArgs(); + } +} diff --git a/sdk/dotnet/Datastream/Outputs/StreamSourceConfigSqlServerSourceConfig.cs b/sdk/dotnet/Datastream/Outputs/StreamSourceConfigSqlServerSourceConfig.cs index 698c4e0d77..cebf6a779c 100644 --- a/sdk/dotnet/Datastream/Outputs/StreamSourceConfigSqlServerSourceConfig.cs +++ b/sdk/dotnet/Datastream/Outputs/StreamSourceConfigSqlServerSourceConfig.cs @@ -13,6 +13,10 @@ namespace Pulumi.Gcp.Datastream.Outputs [OutputType] public sealed class StreamSourceConfigSqlServerSourceConfig { + /// + /// CDC reader reads from change tables. + /// + public readonly Outputs.StreamSourceConfigSqlServerSourceConfigChangeTables? ChangeTables; /// /// SQL Server objects to exclude from the stream. /// Structure is documented below. @@ -31,21 +35,31 @@ public sealed class StreamSourceConfigSqlServerSourceConfig /// Max concurrent CDC tasks. /// public readonly int? MaxConcurrentCdcTasks; + /// + /// CDC reader reads from transaction logs. + /// + public readonly Outputs.StreamSourceConfigSqlServerSourceConfigTransactionLogs? TransactionLogs; [OutputConstructor] private StreamSourceConfigSqlServerSourceConfig( + Outputs.StreamSourceConfigSqlServerSourceConfigChangeTables? changeTables, + Outputs.StreamSourceConfigSqlServerSourceConfigExcludeObjects? excludeObjects, Outputs.StreamSourceConfigSqlServerSourceConfigIncludeObjects? includeObjects, int? maxConcurrentBackfillTasks, - int? maxConcurrentCdcTasks) + int? maxConcurrentCdcTasks, + + Outputs.StreamSourceConfigSqlServerSourceConfigTransactionLogs? transactionLogs) { + ChangeTables = changeTables; ExcludeObjects = excludeObjects; IncludeObjects = includeObjects; MaxConcurrentBackfillTasks = maxConcurrentBackfillTasks; MaxConcurrentCdcTasks = maxConcurrentCdcTasks; + TransactionLogs = transactionLogs; } } } diff --git a/sdk/dotnet/Datastream/Outputs/StreamSourceConfigSqlServerSourceConfigChangeTables.cs b/sdk/dotnet/Datastream/Outputs/StreamSourceConfigSqlServerSourceConfigChangeTables.cs new file mode 100644 index 0000000000..15c2cae821 --- /dev/null +++ b/sdk/dotnet/Datastream/Outputs/StreamSourceConfigSqlServerSourceConfigChangeTables.cs @@ -0,0 +1,21 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Datastream.Outputs +{ + + [OutputType] + public sealed class StreamSourceConfigSqlServerSourceConfigChangeTables + { + [OutputConstructor] + private StreamSourceConfigSqlServerSourceConfigChangeTables() + { + } + } +} diff --git a/sdk/dotnet/Datastream/Outputs/StreamSourceConfigSqlServerSourceConfigTransactionLogs.cs b/sdk/dotnet/Datastream/Outputs/StreamSourceConfigSqlServerSourceConfigTransactionLogs.cs new file mode 100644 index 0000000000..96f42a7b1d --- /dev/null +++ b/sdk/dotnet/Datastream/Outputs/StreamSourceConfigSqlServerSourceConfigTransactionLogs.cs @@ -0,0 +1,21 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Datastream.Outputs +{ + + [OutputType] + public sealed class StreamSourceConfigSqlServerSourceConfigTransactionLogs + { + [OutputConstructor] + private StreamSourceConfigSqlServerSourceConfigTransactionLogs() + { + } + } +} diff --git a/sdk/dotnet/Datastream/Stream.cs b/sdk/dotnet/Datastream/Stream.cs index 09a7c80811..c083bd9db0 100644 --- a/sdk/dotnet/Datastream/Stream.cs +++ b/sdk/dotnet/Datastream/Stream.cs @@ -698,6 +698,148 @@ namespace Pulumi.Gcp.Datastream /// }, /// }, /// }, + /// TransactionLogs = null, + /// }, + /// }, + /// DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs + /// { + /// DestinationConnectionProfile = destination.Id, + /// BigqueryDestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigArgs + /// { + /// DataFreshness = "900s", + /// SourceHierarchyDatasets = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs + /// { + /// DatasetTemplate = new Gcp.Datastream.Inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs + /// { + /// Location = "us-central1", + /// }, + /// }, + /// }, + /// }, + /// BackfillNone = null, + /// }); + /// + /// }); + /// ``` + /// ### Datastream Stream Sql Server Change Tables + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var instance = new Gcp.Sql.DatabaseInstance("instance", new() + /// { + /// Name = "sql-server", + /// DatabaseVersion = "SQLSERVER_2019_STANDARD", + /// Region = "us-central1", + /// RootPassword = "root-password", + /// DeletionProtection = true, + /// Settings = new Gcp.Sql.Inputs.DatabaseInstanceSettingsArgs + /// { + /// Tier = "db-custom-2-4096", + /// IpConfiguration = new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationArgs + /// { + /// AuthorizedNetworks = new[] + /// { + /// new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs + /// { + /// Value = "34.71.242.81", + /// }, + /// new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs + /// { + /// Value = "34.72.28.29", + /// }, + /// new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs + /// { + /// Value = "34.67.6.157", + /// }, + /// new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs + /// { + /// Value = "34.67.234.134", + /// }, + /// new Gcp.Sql.Inputs.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs + /// { + /// Value = "34.72.239.218", + /// }, + /// }, + /// }, + /// }, + /// }); + /// + /// var user = new Gcp.Sql.User("user", new() + /// { + /// Name = "user", + /// Instance = instance.Name, + /// Password = "password", + /// }); + /// + /// var db = new Gcp.Sql.Database("db", new() + /// { + /// Name = "db", + /// Instance = instance.Name, + /// }, new CustomResourceOptions + /// { + /// DependsOn = + /// { + /// user, + /// }, + /// }); + /// + /// var source = new Gcp.Datastream.ConnectionProfile("source", new() + /// { + /// DisplayName = "SQL Server Source", + /// Location = "us-central1", + /// ConnectionProfileId = "source-profile", + /// SqlServerProfile = new Gcp.Datastream.Inputs.ConnectionProfileSqlServerProfileArgs + /// { + /// Hostname = instance.PublicIpAddress, + /// Port = 1433, + /// Username = user.Name, + /// Password = user.Password, + /// Database = db.Name, + /// }, + /// }); + /// + /// var destination = new Gcp.Datastream.ConnectionProfile("destination", new() + /// { + /// DisplayName = "BigQuery Destination", + /// Location = "us-central1", + /// ConnectionProfileId = "destination-profile", + /// BigqueryProfile = null, + /// }); + /// + /// var @default = new Gcp.Datastream.Stream("default", new() + /// { + /// DisplayName = "SQL Server to BigQuery", + /// Location = "us-central1", + /// StreamId = "stream", + /// SourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigArgs + /// { + /// SourceConnectionProfile = source.Id, + /// SqlServerSourceConfig = new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigArgs + /// { + /// IncludeObjects = new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs + /// { + /// Schemas = new[] + /// { + /// new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArgs + /// { + /// Schema = "schema", + /// Tables = new[] + /// { + /// new Gcp.Datastream.Inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArgs + /// { + /// Table = "table", + /// }, + /// }, + /// }, + /// }, + /// }, + /// ChangeTables = null, /// }, /// }, /// DestinationConfig = new Gcp.Datastream.Inputs.StreamDestinationConfigArgs @@ -1173,7 +1315,8 @@ public partial class Stream : global::Pulumi.CustomResource public Output CustomerManagedEncryptionKey { get; private set; } = null!; /// - /// Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + /// Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + /// values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED /// [Output("desiredState")] public Output DesiredState { get; private set; } = null!; @@ -1322,7 +1465,8 @@ public sealed class StreamArgs : global::Pulumi.ResourceArgs public Input? CustomerManagedEncryptionKey { get; set; } /// - /// Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + /// Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + /// values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED /// [Input("desiredState")] public Input? DesiredState { get; set; } @@ -1409,7 +1553,8 @@ public sealed class StreamState : global::Pulumi.ResourceArgs public Input? CustomerManagedEncryptionKey { get; set; } /// - /// Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + /// Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + /// values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED /// [Input("desiredState")] public Input? DesiredState { get; set; } diff --git a/sdk/dotnet/DiscoveryEngine/DataStore.cs b/sdk/dotnet/DiscoveryEngine/DataStore.cs index 4ae1273c35..6f0f1a15dc 100644 --- a/sdk/dotnet/DiscoveryEngine/DataStore.cs +++ b/sdk/dotnet/DiscoveryEngine/DataStore.cs @@ -173,7 +173,7 @@ public partial class DataStore : global::Pulumi.CustomResource /// /// The industry vertical that the data store registers. - /// Possible values are: `GENERIC`, `MEDIA`. + /// Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. /// [Output("industryVertical")] public Output IndustryVertical { get; private set; } = null!; @@ -215,7 +215,7 @@ public partial class DataStore : global::Pulumi.CustomResource /// /// The solutions that the data store enrolls. - /// Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + /// Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. /// [Output("solutionTypes")] public Output> SolutionTypes { get; private set; } = null!; @@ -306,7 +306,7 @@ public sealed class DataStoreArgs : global::Pulumi.ResourceArgs /// /// The industry vertical that the data store registers. - /// Possible values are: `GENERIC`, `MEDIA`. + /// Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. /// [Input("industryVertical", required: true)] public Input IndustryVertical { get; set; } = null!; @@ -342,7 +342,7 @@ public sealed class DataStoreArgs : global::Pulumi.ResourceArgs /// /// The solutions that the data store enrolls. - /// Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + /// Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. /// public InputList SolutionTypes { @@ -410,7 +410,7 @@ public sealed class DataStoreState : global::Pulumi.ResourceArgs /// /// The industry vertical that the data store registers. - /// Possible values are: `GENERIC`, `MEDIA`. + /// Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. /// [Input("industryVertical")] public Input? IndustryVertical { get; set; } @@ -455,7 +455,7 @@ public sealed class DataStoreState : global::Pulumi.ResourceArgs /// /// The solutions that the data store enrolls. - /// Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + /// Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. /// public InputList SolutionTypes { diff --git a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigArgs.cs b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigArgs.cs index 47013b3e47..22b48b586f 100644 --- a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigArgs.cs +++ b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigArgs.cs @@ -12,6 +12,13 @@ namespace Pulumi.Gcp.DiscoveryEngine.Inputs public sealed class DataStoreDocumentProcessingConfigArgs : global::Pulumi.ResourceArgs { + /// + /// Whether chunking mode is enabled. + /// Structure is documented below. + /// + [Input("chunkingConfig")] + public Input? ChunkingConfig { get; set; } + /// /// Configurations for default Document parser. If not specified, this resource /// will be configured to use a default DigitalParsingConfig, and the default parsing diff --git a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigArgs.cs b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigArgs.cs new file mode 100644 index 0000000000..304101858b --- /dev/null +++ b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigArgs.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DiscoveryEngine.Inputs +{ + + public sealed class DataStoreDocumentProcessingConfigChunkingConfigArgs : global::Pulumi.ResourceArgs + { + /// + /// Configuration for the layout based chunking. + /// Structure is documented below. + /// + [Input("layoutBasedChunkingConfig")] + public Input? LayoutBasedChunkingConfig { get; set; } + + public DataStoreDocumentProcessingConfigChunkingConfigArgs() + { + } + public static new DataStoreDocumentProcessingConfigChunkingConfigArgs Empty => new DataStoreDocumentProcessingConfigChunkingConfigArgs(); + } +} diff --git a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigGetArgs.cs b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigGetArgs.cs new file mode 100644 index 0000000000..c8ba35ac12 --- /dev/null +++ b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigGetArgs.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DiscoveryEngine.Inputs +{ + + public sealed class DataStoreDocumentProcessingConfigChunkingConfigGetArgs : global::Pulumi.ResourceArgs + { + /// + /// Configuration for the layout based chunking. + /// Structure is documented below. + /// + [Input("layoutBasedChunkingConfig")] + public Input? LayoutBasedChunkingConfig { get; set; } + + public DataStoreDocumentProcessingConfigChunkingConfigGetArgs() + { + } + public static new DataStoreDocumentProcessingConfigChunkingConfigGetArgs Empty => new DataStoreDocumentProcessingConfigChunkingConfigGetArgs(); + } +} diff --git a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs.cs b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs.cs new file mode 100644 index 0000000000..bb04d5b222 --- /dev/null +++ b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs.cs @@ -0,0 +1,34 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DiscoveryEngine.Inputs +{ + + public sealed class DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs : global::Pulumi.ResourceArgs + { + /// + /// The token size limit for each chunk. + /// Supported values: 100-500 (inclusive). Default value: 500. + /// + [Input("chunkSize")] + public Input? ChunkSize { get; set; } + + /// + /// Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + /// Default value: False. + /// + [Input("includeAncestorHeadings")] + public Input? IncludeAncestorHeadings { get; set; } + + public DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs() + { + } + public static new DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs Empty => new DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs(); + } +} diff --git a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigGetArgs.cs b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigGetArgs.cs new file mode 100644 index 0000000000..da0163a8dc --- /dev/null +++ b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigGetArgs.cs @@ -0,0 +1,34 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DiscoveryEngine.Inputs +{ + + public sealed class DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigGetArgs : global::Pulumi.ResourceArgs + { + /// + /// The token size limit for each chunk. + /// Supported values: 100-500 (inclusive). Default value: 500. + /// + [Input("chunkSize")] + public Input? ChunkSize { get; set; } + + /// + /// Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + /// Default value: False. + /// + [Input("includeAncestorHeadings")] + public Input? IncludeAncestorHeadings { get; set; } + + public DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigGetArgs() + { + } + public static new DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigGetArgs Empty => new DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigGetArgs(); + } +} diff --git a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigArgs.cs b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigArgs.cs index 2ad6da64cf..9749980029 100644 --- a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigArgs.cs +++ b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigArgs.cs @@ -18,6 +18,12 @@ public sealed class DataStoreDocumentProcessingConfigDefaultParsingConfigArgs : [Input("digitalParsingConfig")] public Input? DigitalParsingConfig { get; set; } + /// + /// Configurations applied to layout parser. + /// + [Input("layoutParsingConfig")] + public Input? LayoutParsingConfig { get; set; } + /// /// Configurations applied to OCR parser. Currently it only applies to PDFs. /// Structure is documented below. diff --git a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigGetArgs.cs b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigGetArgs.cs index 67355e19cd..e90ba10101 100644 --- a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigGetArgs.cs +++ b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigGetArgs.cs @@ -18,6 +18,12 @@ public sealed class DataStoreDocumentProcessingConfigDefaultParsingConfigGetArgs [Input("digitalParsingConfig")] public Input? DigitalParsingConfig { get; set; } + /// + /// Configurations applied to layout parser. + /// + [Input("layoutParsingConfig")] + public Input? LayoutParsingConfig { get; set; } + /// /// Configurations applied to OCR parser. Currently it only applies to PDFs. /// Structure is documented below. diff --git a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs.cs b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs.cs new file mode 100644 index 0000000000..515a7bc997 --- /dev/null +++ b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs.cs @@ -0,0 +1,20 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DiscoveryEngine.Inputs +{ + + public sealed class DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs : global::Pulumi.ResourceArgs + { + public DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs() + { + } + public static new DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs Empty => new DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs(); + } +} diff --git a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigGetArgs.cs b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigGetArgs.cs new file mode 100644 index 0000000000..072cf5d058 --- /dev/null +++ b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigGetArgs.cs @@ -0,0 +1,20 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DiscoveryEngine.Inputs +{ + + public sealed class DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigGetArgs : global::Pulumi.ResourceArgs + { + public DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigGetArgs() + { + } + public static new DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigGetArgs Empty => new DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigGetArgs(); + } +} diff --git a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigGetArgs.cs b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigGetArgs.cs index 4d46f14c43..dc0cd55abc 100644 --- a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigGetArgs.cs +++ b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigGetArgs.cs @@ -12,6 +12,13 @@ namespace Pulumi.Gcp.DiscoveryEngine.Inputs public sealed class DataStoreDocumentProcessingConfigGetArgs : global::Pulumi.ResourceArgs { + /// + /// Whether chunking mode is enabled. + /// Structure is documented below. + /// + [Input("chunkingConfig")] + public Input? ChunkingConfig { get; set; } + /// /// Configurations for default Document parser. If not specified, this resource /// will be configured to use a default DigitalParsingConfig, and the default parsing diff --git a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideArgs.cs b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideArgs.cs index d49050b3e1..123bb38c84 100644 --- a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideArgs.cs +++ b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideArgs.cs @@ -24,6 +24,12 @@ public sealed class DataStoreDocumentProcessingConfigParsingConfigOverrideArgs : [Input("fileType", required: true)] public Input FileType { get; set; } = null!; + /// + /// Configurations applied to layout parser. + /// + [Input("layoutParsingConfig")] + public Input? LayoutParsingConfig { get; set; } + /// /// Configurations applied to OCR parser. Currently it only applies to PDFs. /// Structure is documented below. diff --git a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideGetArgs.cs b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideGetArgs.cs index b00496ef0e..42a9803b48 100644 --- a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideGetArgs.cs +++ b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideGetArgs.cs @@ -24,6 +24,12 @@ public sealed class DataStoreDocumentProcessingConfigParsingConfigOverrideGetArg [Input("fileType", required: true)] public Input FileType { get; set; } = null!; + /// + /// Configurations applied to layout parser. + /// + [Input("layoutParsingConfig")] + public Input? LayoutParsingConfig { get; set; } + /// /// Configurations applied to OCR parser. Currently it only applies to PDFs. /// Structure is documented below. diff --git a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs.cs b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs.cs new file mode 100644 index 0000000000..ef15c5cf58 --- /dev/null +++ b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs.cs @@ -0,0 +1,20 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DiscoveryEngine.Inputs +{ + + public sealed class DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs : global::Pulumi.ResourceArgs + { + public DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs() + { + } + public static new DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs Empty => new DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs(); + } +} diff --git a/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigGetArgs.cs b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigGetArgs.cs new file mode 100644 index 0000000000..15a0691b7e --- /dev/null +++ b/sdk/dotnet/DiscoveryEngine/Inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigGetArgs.cs @@ -0,0 +1,20 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DiscoveryEngine.Inputs +{ + + public sealed class DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigGetArgs : global::Pulumi.ResourceArgs + { + public DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigGetArgs() + { + } + public static new DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigGetArgs Empty => new DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigGetArgs(); + } +} diff --git a/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfig.cs b/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfig.cs index d171f96757..8dd1892ca2 100644 --- a/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfig.cs +++ b/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfig.cs @@ -13,6 +13,11 @@ namespace Pulumi.Gcp.DiscoveryEngine.Outputs [OutputType] public sealed class DataStoreDocumentProcessingConfig { + /// + /// Whether chunking mode is enabled. + /// Structure is documented below. + /// + public readonly Outputs.DataStoreDocumentProcessingConfigChunkingConfig? ChunkingConfig; /// /// Configurations for default Document parser. If not specified, this resource /// will be configured to use a default DigitalParsingConfig, and the default parsing @@ -33,12 +38,15 @@ public sealed class DataStoreDocumentProcessingConfig [OutputConstructor] private DataStoreDocumentProcessingConfig( + Outputs.DataStoreDocumentProcessingConfigChunkingConfig? chunkingConfig, + Outputs.DataStoreDocumentProcessingConfigDefaultParsingConfig? defaultParsingConfig, string? name, ImmutableArray parsingConfigOverrides) { + ChunkingConfig = chunkingConfig; DefaultParsingConfig = defaultParsingConfig; Name = name; ParsingConfigOverrides = parsingConfigOverrides; diff --git a/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigChunkingConfig.cs b/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigChunkingConfig.cs new file mode 100644 index 0000000000..eae63d335d --- /dev/null +++ b/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigChunkingConfig.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DiscoveryEngine.Outputs +{ + + [OutputType] + public sealed class DataStoreDocumentProcessingConfigChunkingConfig + { + /// + /// Configuration for the layout based chunking. + /// Structure is documented below. + /// + public readonly Outputs.DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig? LayoutBasedChunkingConfig; + + [OutputConstructor] + private DataStoreDocumentProcessingConfigChunkingConfig(Outputs.DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig? layoutBasedChunkingConfig) + { + LayoutBasedChunkingConfig = layoutBasedChunkingConfig; + } + } +} diff --git a/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig.cs b/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig.cs new file mode 100644 index 0000000000..bb7859c780 --- /dev/null +++ b/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig.cs @@ -0,0 +1,37 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DiscoveryEngine.Outputs +{ + + [OutputType] + public sealed class DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig + { + /// + /// The token size limit for each chunk. + /// Supported values: 100-500 (inclusive). Default value: 500. + /// + public readonly int? ChunkSize; + /// + /// Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + /// Default value: False. + /// + public readonly bool? IncludeAncestorHeadings; + + [OutputConstructor] + private DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig( + int? chunkSize, + + bool? includeAncestorHeadings) + { + ChunkSize = chunkSize; + IncludeAncestorHeadings = includeAncestorHeadings; + } + } +} diff --git a/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigDefaultParsingConfig.cs b/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigDefaultParsingConfig.cs index 516d6fbb2a..8bcca7898a 100644 --- a/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigDefaultParsingConfig.cs +++ b/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigDefaultParsingConfig.cs @@ -18,6 +18,10 @@ public sealed class DataStoreDocumentProcessingConfigDefaultParsingConfig /// public readonly Outputs.DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig? DigitalParsingConfig; /// + /// Configurations applied to layout parser. + /// + public readonly Outputs.DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig? LayoutParsingConfig; + /// /// Configurations applied to OCR parser. Currently it only applies to PDFs. /// Structure is documented below. /// @@ -27,9 +31,12 @@ public sealed class DataStoreDocumentProcessingConfigDefaultParsingConfig private DataStoreDocumentProcessingConfigDefaultParsingConfig( Outputs.DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig? digitalParsingConfig, + Outputs.DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig? layoutParsingConfig, + Outputs.DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig? ocrParsingConfig) { DigitalParsingConfig = digitalParsingConfig; + LayoutParsingConfig = layoutParsingConfig; OcrParsingConfig = ocrParsingConfig; } } diff --git a/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig.cs b/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig.cs new file mode 100644 index 0000000000..07f2f19dd6 --- /dev/null +++ b/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig.cs @@ -0,0 +1,21 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DiscoveryEngine.Outputs +{ + + [OutputType] + public sealed class DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig + { + [OutputConstructor] + private DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig() + { + } + } +} diff --git a/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigParsingConfigOverride.cs b/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigParsingConfigOverride.cs index 09802343c3..ace32d77f8 100644 --- a/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigParsingConfigOverride.cs +++ b/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigParsingConfigOverride.cs @@ -22,6 +22,10 @@ public sealed class DataStoreDocumentProcessingConfigParsingConfigOverride /// public readonly string FileType; /// + /// Configurations applied to layout parser. + /// + public readonly Outputs.DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig? LayoutParsingConfig; + /// /// Configurations applied to OCR parser. Currently it only applies to PDFs. /// Structure is documented below. /// @@ -33,10 +37,13 @@ private DataStoreDocumentProcessingConfigParsingConfigOverride( string fileType, + Outputs.DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig? layoutParsingConfig, + Outputs.DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig? ocrParsingConfig) { DigitalParsingConfig = digitalParsingConfig; FileType = fileType; + LayoutParsingConfig = layoutParsingConfig; OcrParsingConfig = ocrParsingConfig; } } diff --git a/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig.cs b/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig.cs new file mode 100644 index 0000000000..b271204e65 --- /dev/null +++ b/sdk/dotnet/DiscoveryEngine/Outputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig.cs @@ -0,0 +1,21 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.DiscoveryEngine.Outputs +{ + + [OutputType] + public sealed class DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig + { + [OutputConstructor] + private DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig() + { + } + } +} diff --git a/sdk/dotnet/Firebase/DatabaseInstance.cs b/sdk/dotnet/Firebase/DatabaseInstance.cs index 99c784b0be..41926a74b1 100644 --- a/sdk/dotnet/Firebase/DatabaseInstance.cs +++ b/sdk/dotnet/Firebase/DatabaseInstance.cs @@ -143,7 +143,7 @@ public partial class DatabaseInstance : global::Pulumi.CustomResource public Output DatabaseUrl { get; private set; } = null!; /// - /// The intended database state. + /// The intended database state. Possible values: ACTIVE, DISABLED. /// [Output("desiredState")] public Output DesiredState { get; private set; } = null!; @@ -245,7 +245,7 @@ public static DatabaseInstance Get(string name, Input id, DatabaseInstan public sealed class DatabaseInstanceArgs : global::Pulumi.ResourceArgs { /// - /// The intended database state. + /// The intended database state. Possible values: ACTIVE, DISABLED. /// [Input("desiredState")] public Input? DesiredState { get; set; } @@ -301,7 +301,7 @@ public sealed class DatabaseInstanceState : global::Pulumi.ResourceArgs public Input? DatabaseUrl { get; set; } /// - /// The intended database state. + /// The intended database state. Possible values: ACTIVE, DISABLED. /// [Input("desiredState")] public Input? DesiredState { get; set; } diff --git a/sdk/dotnet/GkeHub/FeatureMembership.cs b/sdk/dotnet/GkeHub/FeatureMembership.cs index 70dff1c6ce..aff4bd5a85 100644 --- a/sdk/dotnet/GkeHub/FeatureMembership.cs +++ b/sdk/dotnet/GkeHub/FeatureMembership.cs @@ -60,9 +60,10 @@ namespace Pulumi.Gcp.GkeHub /// Membership = membership.MembershipId, /// Configmanagement = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementArgs /// { - /// Version = "1.6.2", + /// Version = "1.19.0", /// ConfigSync = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncArgs /// { + /// Enabled = true, /// Git = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncGitArgs /// { /// SyncRepo = "https://github.com/hashicorp/terraform", @@ -119,9 +120,10 @@ namespace Pulumi.Gcp.GkeHub /// Membership = membership.MembershipId, /// Configmanagement = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementArgs /// { - /// Version = "1.15.1", + /// Version = "1.19.0", /// ConfigSync = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncArgs /// { + /// Enabled = true, /// Oci = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncOciArgs /// { /// SyncRepo = "us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest", @@ -257,9 +259,10 @@ namespace Pulumi.Gcp.GkeHub /// MembershipLocation = membership.Location, /// Configmanagement = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementArgs /// { - /// Version = "1.6.2", + /// Version = "1.19.0", /// ConfigSync = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncArgs /// { + /// Enabled = true, /// Git = new Gcp.GkeHub.Inputs.FeatureMembershipConfigmanagementConfigSyncGitArgs /// { /// SyncRepo = "https://github.com/hashicorp/terraform", diff --git a/sdk/dotnet/GkeHub/Inputs/FeatureMembershipConfigmanagementArgs.cs b/sdk/dotnet/GkeHub/Inputs/FeatureMembershipConfigmanagementArgs.cs index 7651ebe1c3..2a494c0a90 100644 --- a/sdk/dotnet/GkeHub/Inputs/FeatureMembershipConfigmanagementArgs.cs +++ b/sdk/dotnet/GkeHub/Inputs/FeatureMembershipConfigmanagementArgs.cs @@ -13,7 +13,9 @@ namespace Pulumi.Gcp.GkeHub.Inputs public sealed class FeatureMembershipConfigmanagementArgs : global::Pulumi.ResourceArgs { /// + /// (Optional, Deprecated) /// Binauthz configuration for the cluster. Structure is documented below. + /// This field will be ignored and should not be set. /// [Input("binauthz")] public Input? Binauthz { get; set; } @@ -26,6 +28,10 @@ public sealed class FeatureMembershipConfigmanagementArgs : global::Pulumi.Resou /// /// Hierarchy Controller configuration for the cluster. Structure is documented below. + /// Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. + /// Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. + /// Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) + /// to migrate from Hierarchy Controller to HNC. /// [Input("hierarchyController")] public Input? HierarchyController { get; set; } @@ -38,6 +44,8 @@ public sealed class FeatureMembershipConfigmanagementArgs : global::Pulumi.Resou /// /// Policy Controller configuration for the cluster. Structure is documented below. + /// Configuring Policy Controller through the configmanagement feature is no longer recommended. + /// Use the policycontroller feature instead. /// [Input("policyController")] public Input? PolicyController { get; set; } diff --git a/sdk/dotnet/GkeHub/Inputs/FeatureMembershipConfigmanagementGetArgs.cs b/sdk/dotnet/GkeHub/Inputs/FeatureMembershipConfigmanagementGetArgs.cs index 42723458f6..62052d4908 100644 --- a/sdk/dotnet/GkeHub/Inputs/FeatureMembershipConfigmanagementGetArgs.cs +++ b/sdk/dotnet/GkeHub/Inputs/FeatureMembershipConfigmanagementGetArgs.cs @@ -13,7 +13,9 @@ namespace Pulumi.Gcp.GkeHub.Inputs public sealed class FeatureMembershipConfigmanagementGetArgs : global::Pulumi.ResourceArgs { /// + /// (Optional, Deprecated) /// Binauthz configuration for the cluster. Structure is documented below. + /// This field will be ignored and should not be set. /// [Input("binauthz")] public Input? Binauthz { get; set; } @@ -26,6 +28,10 @@ public sealed class FeatureMembershipConfigmanagementGetArgs : global::Pulumi.Re /// /// Hierarchy Controller configuration for the cluster. Structure is documented below. + /// Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. + /// Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. + /// Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) + /// to migrate from Hierarchy Controller to HNC. /// [Input("hierarchyController")] public Input? HierarchyController { get; set; } @@ -38,6 +44,8 @@ public sealed class FeatureMembershipConfigmanagementGetArgs : global::Pulumi.Re /// /// Policy Controller configuration for the cluster. Structure is documented below. + /// Configuring Policy Controller through the configmanagement feature is no longer recommended. + /// Use the policycontroller feature instead. /// [Input("policyController")] public Input? PolicyController { get; set; } diff --git a/sdk/dotnet/GkeHub/Outputs/FeatureMembershipConfigmanagement.cs b/sdk/dotnet/GkeHub/Outputs/FeatureMembershipConfigmanagement.cs index c5024ba616..8dc8ac7d3b 100644 --- a/sdk/dotnet/GkeHub/Outputs/FeatureMembershipConfigmanagement.cs +++ b/sdk/dotnet/GkeHub/Outputs/FeatureMembershipConfigmanagement.cs @@ -14,7 +14,9 @@ namespace Pulumi.Gcp.GkeHub.Outputs public sealed class FeatureMembershipConfigmanagement { /// + /// (Optional, Deprecated) /// Binauthz configuration for the cluster. Structure is documented below. + /// This field will be ignored and should not be set. /// public readonly Outputs.FeatureMembershipConfigmanagementBinauthz? Binauthz; /// @@ -23,6 +25,10 @@ public sealed class FeatureMembershipConfigmanagement public readonly Outputs.FeatureMembershipConfigmanagementConfigSync? ConfigSync; /// /// Hierarchy Controller configuration for the cluster. Structure is documented below. + /// Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. + /// Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. + /// Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) + /// to migrate from Hierarchy Controller to HNC. /// public readonly Outputs.FeatureMembershipConfigmanagementHierarchyController? HierarchyController; /// @@ -31,6 +37,8 @@ public sealed class FeatureMembershipConfigmanagement public readonly string? Management; /// /// Policy Controller configuration for the cluster. Structure is documented below. + /// Configuring Policy Controller through the configmanagement feature is no longer recommended. + /// Use the policycontroller feature instead. /// public readonly Outputs.FeatureMembershipConfigmanagementPolicyController? PolicyController; /// diff --git a/sdk/dotnet/Iam/GetWorkloadIdentityPoolProvider.cs b/sdk/dotnet/Iam/GetWorkloadIdentityPoolProvider.cs index 80a1afb497..990d8e3900 100644 --- a/sdk/dotnet/Iam/GetWorkloadIdentityPoolProvider.cs +++ b/sdk/dotnet/Iam/GetWorkloadIdentityPoolProvider.cs @@ -146,6 +146,7 @@ public sealed class GetWorkloadIdentityPoolProviderResult public readonly string State; public readonly string WorkloadIdentityPoolId; public readonly string WorkloadIdentityPoolProviderId; + public readonly ImmutableArray X509s; [OutputConstructor] private GetWorkloadIdentityPoolProviderResult( @@ -175,7 +176,9 @@ private GetWorkloadIdentityPoolProviderResult( string workloadIdentityPoolId, - string workloadIdentityPoolProviderId) + string workloadIdentityPoolProviderId, + + ImmutableArray x509s) { AttributeCondition = attributeCondition; AttributeMapping = attributeMapping; @@ -191,6 +194,7 @@ private GetWorkloadIdentityPoolProviderResult( State = state; WorkloadIdentityPoolId = workloadIdentityPoolId; WorkloadIdentityPoolProviderId = workloadIdentityPoolProviderId; + X509s = x509s; } } } diff --git a/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderSamlArgs.cs b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderSamlArgs.cs index 2669a7894c..c30dc84bd1 100644 --- a/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderSamlArgs.cs +++ b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderSamlArgs.cs @@ -14,6 +14,8 @@ public sealed class WorkloadIdentityPoolProviderSamlArgs : global::Pulumi.Resour { /// /// SAML Identity provider configuration metadata xml doc. + /// + /// <a name="nested_x509"></a>The `x509` block supports: /// [Input("idpMetadataXml", required: true)] public Input IdpMetadataXml { get; set; } = null!; diff --git a/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderSamlGetArgs.cs b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderSamlGetArgs.cs index 9f34ef6d8b..767c10e312 100644 --- a/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderSamlGetArgs.cs +++ b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderSamlGetArgs.cs @@ -14,6 +14,8 @@ public sealed class WorkloadIdentityPoolProviderSamlGetArgs : global::Pulumi.Res { /// /// SAML Identity provider configuration metadata xml doc. + /// + /// <a name="nested_x509"></a>The `x509` block supports: /// [Input("idpMetadataXml", required: true)] public Input IdpMetadataXml { get; set; } = null!; diff --git a/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509Args.cs b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509Args.cs new file mode 100644 index 0000000000..acdf6868db --- /dev/null +++ b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509Args.cs @@ -0,0 +1,30 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Inputs +{ + + public sealed class WorkloadIdentityPoolProviderX509Args : global::Pulumi.ResourceArgs + { + /// + /// A Trust store, use this trust store as a wrapper to config the trust + /// anchor and optional intermediate cas to help build the trust chain for + /// the incoming end entity certificate. Follow the x509 guidelines to + /// define those PEM encoded certs. Only 1 trust store is currently + /// supported. + /// + [Input("trustStore", required: true)] + public Input TrustStore { get; set; } = null!; + + public WorkloadIdentityPoolProviderX509Args() + { + } + public static new WorkloadIdentityPoolProviderX509Args Empty => new WorkloadIdentityPoolProviderX509Args(); + } +} diff --git a/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509GetArgs.cs b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509GetArgs.cs new file mode 100644 index 0000000000..0eb30fdea4 --- /dev/null +++ b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509GetArgs.cs @@ -0,0 +1,30 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Inputs +{ + + public sealed class WorkloadIdentityPoolProviderX509GetArgs : global::Pulumi.ResourceArgs + { + /// + /// A Trust store, use this trust store as a wrapper to config the trust + /// anchor and optional intermediate cas to help build the trust chain for + /// the incoming end entity certificate. Follow the x509 guidelines to + /// define those PEM encoded certs. Only 1 trust store is currently + /// supported. + /// + [Input("trustStore", required: true)] + public Input TrustStore { get; set; } = null!; + + public WorkloadIdentityPoolProviderX509GetArgs() + { + } + public static new WorkloadIdentityPoolProviderX509GetArgs Empty => new WorkloadIdentityPoolProviderX509GetArgs(); + } +} diff --git a/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreArgs.cs b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreArgs.cs new file mode 100644 index 0000000000..e5be6e282b --- /dev/null +++ b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreArgs.cs @@ -0,0 +1,50 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Inputs +{ + + public sealed class WorkloadIdentityPoolProviderX509TrustStoreArgs : global::Pulumi.ResourceArgs + { + [Input("intermediateCas")] + private InputList? _intermediateCas; + + /// + /// Set of intermediate CA certificates used for building the trust chain to + /// trust anchor. + /// IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + /// Structure is documented below. + /// + public InputList IntermediateCas + { + get => _intermediateCas ?? (_intermediateCas = new InputList()); + set => _intermediateCas = value; + } + + [Input("trustAnchors", required: true)] + private InputList? _trustAnchors; + + /// + /// List of Trust Anchors to be used while performing validation + /// against a given TrustStore. The incoming end entity's certificate + /// must be chained up to one of the trust anchors here. + /// Structure is documented below. + /// + public InputList TrustAnchors + { + get => _trustAnchors ?? (_trustAnchors = new InputList()); + set => _trustAnchors = value; + } + + public WorkloadIdentityPoolProviderX509TrustStoreArgs() + { + } + public static new WorkloadIdentityPoolProviderX509TrustStoreArgs Empty => new WorkloadIdentityPoolProviderX509TrustStoreArgs(); + } +} diff --git a/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreGetArgs.cs b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreGetArgs.cs new file mode 100644 index 0000000000..f5b2f12783 --- /dev/null +++ b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreGetArgs.cs @@ -0,0 +1,50 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Inputs +{ + + public sealed class WorkloadIdentityPoolProviderX509TrustStoreGetArgs : global::Pulumi.ResourceArgs + { + [Input("intermediateCas")] + private InputList? _intermediateCas; + + /// + /// Set of intermediate CA certificates used for building the trust chain to + /// trust anchor. + /// IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + /// Structure is documented below. + /// + public InputList IntermediateCas + { + get => _intermediateCas ?? (_intermediateCas = new InputList()); + set => _intermediateCas = value; + } + + [Input("trustAnchors", required: true)] + private InputList? _trustAnchors; + + /// + /// List of Trust Anchors to be used while performing validation + /// against a given TrustStore. The incoming end entity's certificate + /// must be chained up to one of the trust anchors here. + /// Structure is documented below. + /// + public InputList TrustAnchors + { + get => _trustAnchors ?? (_trustAnchors = new InputList()); + set => _trustAnchors = value; + } + + public WorkloadIdentityPoolProviderX509TrustStoreGetArgs() + { + } + public static new WorkloadIdentityPoolProviderX509TrustStoreGetArgs Empty => new WorkloadIdentityPoolProviderX509TrustStoreGetArgs(); + } +} diff --git a/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs.cs b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs.cs new file mode 100644 index 0000000000..49931c1316 --- /dev/null +++ b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Inputs +{ + + public sealed class WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs : global::Pulumi.ResourceArgs + { + /// + /// PEM certificate of the PKI used for validation. Must only contain one + /// ca certificate(either root or intermediate cert). + /// + [Input("pemCertificate")] + public Input? PemCertificate { get; set; } + + public WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs() + { + } + public static new WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs Empty => new WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs(); + } +} diff --git a/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaGetArgs.cs b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaGetArgs.cs new file mode 100644 index 0000000000..4264b5bf48 --- /dev/null +++ b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaGetArgs.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Inputs +{ + + public sealed class WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaGetArgs : global::Pulumi.ResourceArgs + { + /// + /// PEM certificate of the PKI used for validation. Must only contain one + /// ca certificate(either root or intermediate cert). + /// + [Input("pemCertificate")] + public Input? PemCertificate { get; set; } + + public WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaGetArgs() + { + } + public static new WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaGetArgs Empty => new WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaGetArgs(); + } +} diff --git a/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs.cs b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs.cs new file mode 100644 index 0000000000..e969bf2a00 --- /dev/null +++ b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Inputs +{ + + public sealed class WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs : global::Pulumi.ResourceArgs + { + /// + /// PEM certificate of the PKI used for validation. Must only contain one + /// ca certificate(either root or intermediate cert). + /// + [Input("pemCertificate")] + public Input? PemCertificate { get; set; } + + public WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs() + { + } + public static new WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs Empty => new WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs(); + } +} diff --git a/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorGetArgs.cs b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorGetArgs.cs new file mode 100644 index 0000000000..b8598f5c69 --- /dev/null +++ b/sdk/dotnet/Iam/Inputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorGetArgs.cs @@ -0,0 +1,27 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Inputs +{ + + public sealed class WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorGetArgs : global::Pulumi.ResourceArgs + { + /// + /// PEM certificate of the PKI used for validation. Must only contain one + /// ca certificate(either root or intermediate cert). + /// + [Input("pemCertificate")] + public Input? PemCertificate { get; set; } + + public WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorGetArgs() + { + } + public static new WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorGetArgs Empty => new WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorGetArgs(); + } +} diff --git a/sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509Result.cs b/sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509Result.cs new file mode 100644 index 0000000000..719093ed41 --- /dev/null +++ b/sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509Result.cs @@ -0,0 +1,31 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Outputs +{ + + [OutputType] + public sealed class GetWorkloadIdentityPoolProviderX509Result + { + /// + /// A Trust store, use this trust store as a wrapper to config the trust + /// anchor and optional intermediate cas to help build the trust chain for + /// the incoming end entity certificate. Follow the x509 guidelines to + /// define those PEM encoded certs. Only 1 trust store is currently + /// supported. + /// + public readonly ImmutableArray TrustStores; + + [OutputConstructor] + private GetWorkloadIdentityPoolProviderX509Result(ImmutableArray trustStores) + { + TrustStores = trustStores; + } + } +} diff --git a/sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaResult.cs b/sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaResult.cs new file mode 100644 index 0000000000..907cec358d --- /dev/null +++ b/sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaResult.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Outputs +{ + + [OutputType] + public sealed class GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaResult + { + /// + /// PEM certificate of the PKI used for validation. Must only contain one + /// ca certificate(either root or intermediate cert). + /// + public readonly string PemCertificate; + + [OutputConstructor] + private GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaResult(string pemCertificate) + { + PemCertificate = pemCertificate; + } + } +} diff --git a/sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509TrustStoreResult.cs b/sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509TrustStoreResult.cs new file mode 100644 index 0000000000..6584c0bb6c --- /dev/null +++ b/sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509TrustStoreResult.cs @@ -0,0 +1,39 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Outputs +{ + + [OutputType] + public sealed class GetWorkloadIdentityPoolProviderX509TrustStoreResult + { + /// + /// Set of intermediate CA certificates used for building the trust chain to + /// trust anchor. + /// IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + /// + public readonly ImmutableArray IntermediateCas; + /// + /// List of Trust Anchors to be used while performing validation + /// against a given TrustStore. The incoming end entity's certificate + /// must be chained up to one of the trust anchors here. + /// + public readonly ImmutableArray TrustAnchors; + + [OutputConstructor] + private GetWorkloadIdentityPoolProviderX509TrustStoreResult( + ImmutableArray intermediateCas, + + ImmutableArray trustAnchors) + { + IntermediateCas = intermediateCas; + TrustAnchors = trustAnchors; + } + } +} diff --git a/sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorResult.cs b/sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorResult.cs new file mode 100644 index 0000000000..b30f483bf7 --- /dev/null +++ b/sdk/dotnet/Iam/Outputs/GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorResult.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Outputs +{ + + [OutputType] + public sealed class GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorResult + { + /// + /// PEM certificate of the PKI used for validation. Must only contain one + /// ca certificate(either root or intermediate cert). + /// + public readonly string PemCertificate; + + [OutputConstructor] + private GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorResult(string pemCertificate) + { + PemCertificate = pemCertificate; + } + } +} diff --git a/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderSaml.cs b/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderSaml.cs index c437103a77..b8849d64e6 100644 --- a/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderSaml.cs +++ b/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderSaml.cs @@ -15,6 +15,8 @@ public sealed class WorkloadIdentityPoolProviderSaml { /// /// SAML Identity provider configuration metadata xml doc. + /// + /// <a name="nested_x509"></a>The `x509` block supports: /// public readonly string IdpMetadataXml; diff --git a/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509.cs b/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509.cs new file mode 100644 index 0000000000..39ae861d20 --- /dev/null +++ b/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509.cs @@ -0,0 +1,31 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Outputs +{ + + [OutputType] + public sealed class WorkloadIdentityPoolProviderX509 + { + /// + /// A Trust store, use this trust store as a wrapper to config the trust + /// anchor and optional intermediate cas to help build the trust chain for + /// the incoming end entity certificate. Follow the x509 guidelines to + /// define those PEM encoded certs. Only 1 trust store is currently + /// supported. + /// + public readonly Outputs.WorkloadIdentityPoolProviderX509TrustStore TrustStore; + + [OutputConstructor] + private WorkloadIdentityPoolProviderX509(Outputs.WorkloadIdentityPoolProviderX509TrustStore trustStore) + { + TrustStore = trustStore; + } + } +} diff --git a/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509TrustStore.cs b/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509TrustStore.cs new file mode 100644 index 0000000000..912faaff75 --- /dev/null +++ b/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509TrustStore.cs @@ -0,0 +1,41 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Outputs +{ + + [OutputType] + public sealed class WorkloadIdentityPoolProviderX509TrustStore + { + /// + /// Set of intermediate CA certificates used for building the trust chain to + /// trust anchor. + /// IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + /// Structure is documented below. + /// + public readonly ImmutableArray IntermediateCas; + /// + /// List of Trust Anchors to be used while performing validation + /// against a given TrustStore. The incoming end entity's certificate + /// must be chained up to one of the trust anchors here. + /// Structure is documented below. + /// + public readonly ImmutableArray TrustAnchors; + + [OutputConstructor] + private WorkloadIdentityPoolProviderX509TrustStore( + ImmutableArray intermediateCas, + + ImmutableArray trustAnchors) + { + IntermediateCas = intermediateCas; + TrustAnchors = trustAnchors; + } + } +} diff --git a/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa.cs b/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa.cs new file mode 100644 index 0000000000..b2d4e76abe --- /dev/null +++ b/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Outputs +{ + + [OutputType] + public sealed class WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa + { + /// + /// PEM certificate of the PKI used for validation. Must only contain one + /// ca certificate(either root or intermediate cert). + /// + public readonly string? PemCertificate; + + [OutputConstructor] + private WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa(string? pemCertificate) + { + PemCertificate = pemCertificate; + } + } +} diff --git a/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor.cs b/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor.cs new file mode 100644 index 0000000000..1938fe2203 --- /dev/null +++ b/sdk/dotnet/Iam/Outputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor.cs @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Iam.Outputs +{ + + [OutputType] + public sealed class WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor + { + /// + /// PEM certificate of the PKI used for validation. Must only contain one + /// ca certificate(either root or intermediate cert). + /// + public readonly string? PemCertificate; + + [OutputConstructor] + private WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor(string? pemCertificate) + { + PemCertificate = pemCertificate; + } + } +} diff --git a/sdk/dotnet/Iam/WorkloadIdentityPoolProvider.cs b/sdk/dotnet/Iam/WorkloadIdentityPoolProvider.cs index d7c8e13033..3ebbca1119 100644 --- a/sdk/dotnet/Iam/WorkloadIdentityPoolProvider.cs +++ b/sdk/dotnet/Iam/WorkloadIdentityPoolProvider.cs @@ -285,6 +285,107 @@ namespace Pulumi.Gcp.Iam /// /// }); /// ``` + /// ### Iam Workload Identity Pool Provider X509 Basic + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// using Std = Pulumi.Std; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var pool = new Gcp.Iam.WorkloadIdentityPool("pool", new() + /// { + /// WorkloadIdentityPoolId = "example-pool", + /// }); + /// + /// var example = new Gcp.Iam.WorkloadIdentityPoolProvider("example", new() + /// { + /// WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId, + /// WorkloadIdentityPoolProviderId = "example-prvdr", + /// AttributeMapping = + /// { + /// { "google.subject", "assertion.subject.dn.cn" }, + /// }, + /// X509 = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderX509Args + /// { + /// TrustStore = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderX509TrustStoreArgs + /// { + /// TrustAnchors = new[] + /// { + /// new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs + /// { + /// PemCertificate = Std.File.Invoke(new() + /// { + /// Input = "test-fixtures/trust_anchor.pem", + /// }).Apply(invoke => invoke.Result), + /// }, + /// }, + /// }, + /// }, + /// }); + /// + /// }); + /// ``` + /// ### Iam Workload Identity Pool Provider X509 Full + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// using Std = Pulumi.Std; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var pool = new Gcp.Iam.WorkloadIdentityPool("pool", new() + /// { + /// WorkloadIdentityPoolId = "example-pool", + /// }); + /// + /// var example = new Gcp.Iam.WorkloadIdentityPoolProvider("example", new() + /// { + /// WorkloadIdentityPoolId = pool.WorkloadIdentityPoolId, + /// WorkloadIdentityPoolProviderId = "example-prvdr", + /// DisplayName = "Name of provider", + /// Description = "X.509 identity pool provider for automated test", + /// Disabled = true, + /// AttributeMapping = + /// { + /// { "google.subject", "assertion.subject.dn.cn" }, + /// }, + /// X509 = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderX509Args + /// { + /// TrustStore = new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderX509TrustStoreArgs + /// { + /// TrustAnchors = new[] + /// { + /// new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs + /// { + /// PemCertificate = Std.File.Invoke(new() + /// { + /// Input = "test-fixtures/trust_anchor.pem", + /// }).Apply(invoke => invoke.Result), + /// }, + /// }, + /// IntermediateCas = new[] + /// { + /// new Gcp.Iam.Inputs.WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs + /// { + /// PemCertificate = Std.File.Invoke(new() + /// { + /// Input = "test-fixtures/intermediate_ca.pem", + /// }).Apply(invoke => invoke.Result), + /// }, + /// }, + /// }, + /// }, + /// }); + /// + /// }); + /// ``` /// /// ## Import /// @@ -464,6 +565,14 @@ public partial class WorkloadIdentityPoolProvider : global::Pulumi.CustomResourc [Output("workloadIdentityPoolProviderId")] public Output WorkloadIdentityPoolProviderId { get; private set; } = null!; + /// + /// An X.509-type identity provider represents a CA. It is trusted to assert a + /// client identity if the client has a certificate that chains up to this CA. + /// Structure is documented below. + /// + [Output("x509")] + public Output X509 { get; private set; } = null!; + /// /// Create a WorkloadIdentityPoolProvider resource with the given unique name, arguments, and options. @@ -648,6 +757,14 @@ public InputMap AttributeMapping [Input("workloadIdentityPoolProviderId", required: true)] public Input WorkloadIdentityPoolProviderId { get; set; } = null!; + /// + /// An X.509-type identity provider represents a CA. It is trusted to assert a + /// client identity if the client has a certificate that chains up to this CA. + /// Structure is documented below. + /// + [Input("x509")] + public Input? X509 { get; set; } + public WorkloadIdentityPoolProviderArgs() { } @@ -813,6 +930,14 @@ public InputMap AttributeMapping [Input("workloadIdentityPoolProviderId")] public Input? WorkloadIdentityPoolProviderId { get; set; } + /// + /// An X.509-type identity provider represents a CA. It is trusted to assert a + /// client identity if the client has a certificate that chains up to this CA. + /// Structure is documented below. + /// + [Input("x509")] + public Input? X509 { get; set; } + public WorkloadIdentityPoolProviderState() { } diff --git a/sdk/dotnet/Kms/AutokeyConfig.cs b/sdk/dotnet/Kms/AutokeyConfig.cs index 226410e0b0..daf1e7fe91 100644 --- a/sdk/dotnet/Kms/AutokeyConfig.cs +++ b/sdk/dotnet/Kms/AutokeyConfig.cs @@ -127,7 +127,7 @@ namespace Pulumi.Gcp.Kms /// /// var example_autokeyconfig = new Gcp.Kms.AutokeyConfig("example-autokeyconfig", new() /// { - /// Folder = autokmsFolder.FolderId, + /// Folder = autokmsFolder.Id, /// KeyProject = keyProject.ProjectId.Apply(projectId => $"projects/{projectId}"), /// }, new CustomResourceOptions /// { @@ -137,6 +137,19 @@ namespace Pulumi.Gcp.Kms /// }, /// }); /// + /// // Wait delay after setting AutokeyConfig, to prevent diffs on reapply, + /// // because setting the config takes a little to fully propagate. + /// var waitAutokeyPropagation = new Time.Index.Sleep("wait_autokey_propagation", new() + /// { + /// CreateDuration = "30s", + /// }, new CustomResourceOptions + /// { + /// DependsOn = + /// { + /// example_autokeyconfig, + /// }, + /// }); + /// /// }); /// ``` /// diff --git a/sdk/dotnet/Kms/GetCryptoKeyLatestVersion.cs b/sdk/dotnet/Kms/GetCryptoKeyLatestVersion.cs new file mode 100644 index 0000000000..0699a9e51b --- /dev/null +++ b/sdk/dotnet/Kms/GetCryptoKeyLatestVersion.cs @@ -0,0 +1,208 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Kms +{ + public static class GetCryptoKeyLatestVersion + { + /// + /// Provides access to the latest Google Cloud Platform KMS CryptoKeyVersion in a CryptoKey. For more information see + /// [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + /// and + /// [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var myKeyRing = Gcp.Kms.GetKMSKeyRing.Invoke(new() + /// { + /// Name = "my-key-ring", + /// Location = "us-central1", + /// }); + /// + /// var myCryptoKey = Gcp.Kms.GetKMSCryptoKey.Invoke(new() + /// { + /// Name = "my-crypto-key", + /// KeyRing = myKeyRing.Apply(getKMSKeyRingResult => getKMSKeyRingResult.Id), + /// }); + /// + /// var myCryptoKeyLatestVersion = Gcp.Kms.GetCryptoKeyLatestVersion.Invoke(new() + /// { + /// CryptoKey = myKey.Id, + /// }); + /// + /// }); + /// ``` + /// + public static Task InvokeAsync(GetCryptoKeyLatestVersionArgs args, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.InvokeAsync("gcp:kms/getCryptoKeyLatestVersion:getCryptoKeyLatestVersion", args ?? new GetCryptoKeyLatestVersionArgs(), options.WithDefaults()); + + /// + /// Provides access to the latest Google Cloud Platform KMS CryptoKeyVersion in a CryptoKey. For more information see + /// [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + /// and + /// [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var myKeyRing = Gcp.Kms.GetKMSKeyRing.Invoke(new() + /// { + /// Name = "my-key-ring", + /// Location = "us-central1", + /// }); + /// + /// var myCryptoKey = Gcp.Kms.GetKMSCryptoKey.Invoke(new() + /// { + /// Name = "my-crypto-key", + /// KeyRing = myKeyRing.Apply(getKMSKeyRingResult => getKMSKeyRingResult.Id), + /// }); + /// + /// var myCryptoKeyLatestVersion = Gcp.Kms.GetCryptoKeyLatestVersion.Invoke(new() + /// { + /// CryptoKey = myKey.Id, + /// }); + /// + /// }); + /// ``` + /// + public static Output Invoke(GetCryptoKeyLatestVersionInvokeArgs args, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.Invoke("gcp:kms/getCryptoKeyLatestVersion:getCryptoKeyLatestVersion", args ?? new GetCryptoKeyLatestVersionInvokeArgs(), options.WithDefaults()); + } + + + public sealed class GetCryptoKeyLatestVersionArgs : global::Pulumi.InvokeArgs + { + /// + /// The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + /// `gcp.kms.CryptoKey` resource/datasource. + /// + [Input("cryptoKey", required: true)] + public string CryptoKey { get; set; } = null!; + + /// + /// The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + /// + /// Example filter values if filtering on state. + /// + /// * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + /// + /// [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + /// + [Input("filter")] + public string? Filter { get; set; } + + public GetCryptoKeyLatestVersionArgs() + { + } + public static new GetCryptoKeyLatestVersionArgs Empty => new GetCryptoKeyLatestVersionArgs(); + } + + public sealed class GetCryptoKeyLatestVersionInvokeArgs : global::Pulumi.InvokeArgs + { + /// + /// The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + /// `gcp.kms.CryptoKey` resource/datasource. + /// + [Input("cryptoKey", required: true)] + public Input CryptoKey { get; set; } = null!; + + /// + /// The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + /// + /// Example filter values if filtering on state. + /// + /// * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + /// + /// [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + /// + [Input("filter")] + public Input? Filter { get; set; } + + public GetCryptoKeyLatestVersionInvokeArgs() + { + } + public static new GetCryptoKeyLatestVersionInvokeArgs Empty => new GetCryptoKeyLatestVersionInvokeArgs(); + } + + + [OutputType] + public sealed class GetCryptoKeyLatestVersionResult + { + /// + /// The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + /// + public readonly string Algorithm; + public readonly string CryptoKey; + public readonly string? Filter; + /// + /// The provider-assigned unique ID for this managed resource. + /// + public readonly string Id; + public readonly string Name; + /// + /// The ProtectionLevel describing how crypto operations are performed with this CryptoKeyVersion. See the [protection_level reference](https://cloud.google.com/kms/docs/reference/rest/v1/ProtectionLevel) for possible outputs. + /// + public readonly string ProtectionLevel; + /// + /// If the enclosing CryptoKey has purpose `ASYMMETRIC_SIGN` or `ASYMMETRIC_DECRYPT`, this block contains details about the public key associated to this CryptoKeyVersion. Structure is documented below. + /// + public readonly ImmutableArray PublicKeys; + /// + /// The current state of the latest CryptoKeyVersion. See the [state reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions#CryptoKeyVersion.CryptoKeyVersionState) for possible outputs. + /// + public readonly string State; + public readonly int Version; + + [OutputConstructor] + private GetCryptoKeyLatestVersionResult( + string algorithm, + + string cryptoKey, + + string? filter, + + string id, + + string name, + + string protectionLevel, + + ImmutableArray publicKeys, + + string state, + + int version) + { + Algorithm = algorithm; + CryptoKey = cryptoKey; + Filter = filter; + Id = id; + Name = name; + ProtectionLevel = protectionLevel; + PublicKeys = publicKeys; + State = state; + Version = version; + } + } +} diff --git a/sdk/dotnet/Kms/GetCryptoKeyVersions.cs b/sdk/dotnet/Kms/GetCryptoKeyVersions.cs new file mode 100644 index 0000000000..b01c60249c --- /dev/null +++ b/sdk/dotnet/Kms/GetCryptoKeyVersions.cs @@ -0,0 +1,187 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Kms +{ + public static class GetCryptoKeyVersions + { + /// + /// Provides access to Google Cloud Platform KMS CryptoKeyVersions. For more information see + /// [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + /// and + /// [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + /// + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var myKeyRing = Gcp.Kms.GetKMSKeyRing.Invoke(new() + /// { + /// Name = "my-key-ring", + /// Location = "us-central1", + /// }); + /// + /// var myCryptoKey = Gcp.Kms.GetKMSCryptoKey.Invoke(new() + /// { + /// Name = "my-crypto-key", + /// KeyRing = myKeyRing.Apply(getKMSKeyRingResult => getKMSKeyRingResult.Id), + /// }); + /// + /// var myCryptoKeyVersions = Gcp.Kms.GetCryptoKeyVersions.Invoke(new() + /// { + /// CryptoKey = myKey.Id, + /// }); + /// + /// }); + /// ``` + /// + public static Task InvokeAsync(GetCryptoKeyVersionsArgs args, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.InvokeAsync("gcp:kms/getCryptoKeyVersions:getCryptoKeyVersions", args ?? new GetCryptoKeyVersionsArgs(), options.WithDefaults()); + + /// + /// Provides access to Google Cloud Platform KMS CryptoKeyVersions. For more information see + /// [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + /// and + /// [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + /// + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var myKeyRing = Gcp.Kms.GetKMSKeyRing.Invoke(new() + /// { + /// Name = "my-key-ring", + /// Location = "us-central1", + /// }); + /// + /// var myCryptoKey = Gcp.Kms.GetKMSCryptoKey.Invoke(new() + /// { + /// Name = "my-crypto-key", + /// KeyRing = myKeyRing.Apply(getKMSKeyRingResult => getKMSKeyRingResult.Id), + /// }); + /// + /// var myCryptoKeyVersions = Gcp.Kms.GetCryptoKeyVersions.Invoke(new() + /// { + /// CryptoKey = myKey.Id, + /// }); + /// + /// }); + /// ``` + /// + public static Output Invoke(GetCryptoKeyVersionsInvokeArgs args, InvokeOptions? options = null) + => global::Pulumi.Deployment.Instance.Invoke("gcp:kms/getCryptoKeyVersions:getCryptoKeyVersions", args ?? new GetCryptoKeyVersionsInvokeArgs(), options.WithDefaults()); + } + + + public sealed class GetCryptoKeyVersionsArgs : global::Pulumi.InvokeArgs + { + /// + /// The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + /// `gcp.kms.CryptoKey` resource/datasource. + /// + [Input("cryptoKey", required: true)] + public string CryptoKey { get; set; } = null!; + + /// + /// The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + /// + /// Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + /// + /// * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + /// * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + /// + /// [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + /// + [Input("filter")] + public string? Filter { get; set; } + + public GetCryptoKeyVersionsArgs() + { + } + public static new GetCryptoKeyVersionsArgs Empty => new GetCryptoKeyVersionsArgs(); + } + + public sealed class GetCryptoKeyVersionsInvokeArgs : global::Pulumi.InvokeArgs + { + /// + /// The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + /// `gcp.kms.CryptoKey` resource/datasource. + /// + [Input("cryptoKey", required: true)] + public Input CryptoKey { get; set; } = null!; + + /// + /// The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + /// + /// Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + /// + /// * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + /// * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + /// + /// [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + /// + [Input("filter")] + public Input? Filter { get; set; } + + public GetCryptoKeyVersionsInvokeArgs() + { + } + public static new GetCryptoKeyVersionsInvokeArgs Empty => new GetCryptoKeyVersionsInvokeArgs(); + } + + + [OutputType] + public sealed class GetCryptoKeyVersionsResult + { + public readonly string CryptoKey; + public readonly string? Filter; + /// + /// The provider-assigned unique ID for this managed resource. + /// + public readonly string Id; + public readonly ImmutableArray PublicKeys; + /// + /// A list of all the retrieved crypto key versions from the provided crypto key. This list is influenced by the provided filter argument. + /// + public readonly ImmutableArray Versions; + + [OutputConstructor] + private GetCryptoKeyVersionsResult( + string cryptoKey, + + string? filter, + + string id, + + ImmutableArray publicKeys, + + ImmutableArray versions) + { + CryptoKey = cryptoKey; + Filter = filter; + Id = id; + PublicKeys = publicKeys; + Versions = versions; + } + } +} diff --git a/sdk/dotnet/Kms/Outputs/GetCryptoKeyLatestVersionPublicKeyResult.cs b/sdk/dotnet/Kms/Outputs/GetCryptoKeyLatestVersionPublicKeyResult.cs new file mode 100644 index 0000000000..75e3f261e0 --- /dev/null +++ b/sdk/dotnet/Kms/Outputs/GetCryptoKeyLatestVersionPublicKeyResult.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Kms.Outputs +{ + + [OutputType] + public sealed class GetCryptoKeyLatestVersionPublicKeyResult + { + /// + /// The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + /// + public readonly string Algorithm; + /// + /// The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + /// + public readonly string Pem; + + [OutputConstructor] + private GetCryptoKeyLatestVersionPublicKeyResult( + string algorithm, + + string pem) + { + Algorithm = algorithm; + Pem = pem; + } + } +} diff --git a/sdk/dotnet/Kms/Outputs/GetCryptoKeyVersionsPublicKeyResult.cs b/sdk/dotnet/Kms/Outputs/GetCryptoKeyVersionsPublicKeyResult.cs new file mode 100644 index 0000000000..0f5eff0433 --- /dev/null +++ b/sdk/dotnet/Kms/Outputs/GetCryptoKeyVersionsPublicKeyResult.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Kms.Outputs +{ + + [OutputType] + public sealed class GetCryptoKeyVersionsPublicKeyResult + { + /// + /// The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + /// + public readonly string Algorithm; + /// + /// The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + /// + public readonly string Pem; + + [OutputConstructor] + private GetCryptoKeyVersionsPublicKeyResult( + string algorithm, + + string pem) + { + Algorithm = algorithm; + Pem = pem; + } + } +} diff --git a/sdk/dotnet/Kms/Outputs/GetCryptoKeyVersionsVersionPublicKeyResult.cs b/sdk/dotnet/Kms/Outputs/GetCryptoKeyVersionsVersionPublicKeyResult.cs new file mode 100644 index 0000000000..c08720214c --- /dev/null +++ b/sdk/dotnet/Kms/Outputs/GetCryptoKeyVersionsVersionPublicKeyResult.cs @@ -0,0 +1,35 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Kms.Outputs +{ + + [OutputType] + public sealed class GetCryptoKeyVersionsVersionPublicKeyResult + { + /// + /// The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + /// + public readonly string Algorithm; + /// + /// The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + /// + public readonly string Pem; + + [OutputConstructor] + private GetCryptoKeyVersionsVersionPublicKeyResult( + string algorithm, + + string pem) + { + Algorithm = algorithm; + Pem = pem; + } + } +} diff --git a/sdk/dotnet/Kms/Outputs/GetCryptoKeyVersionsVersionResult.cs b/sdk/dotnet/Kms/Outputs/GetCryptoKeyVersionsVersionResult.cs new file mode 100644 index 0000000000..021c140281 --- /dev/null +++ b/sdk/dotnet/Kms/Outputs/GetCryptoKeyVersionsVersionResult.cs @@ -0,0 +1,60 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Kms.Outputs +{ + + [OutputType] + public sealed class GetCryptoKeyVersionsVersionResult + { + /// + /// The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + /// + public readonly string Algorithm; + /// + /// The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + /// `gcp.kms.CryptoKey` resource/datasource. + /// + public readonly string CryptoKey; + public readonly string Id; + public readonly string Name; + public readonly string ProtectionLevel; + public readonly ImmutableArray PublicKeys; + public readonly string State; + public readonly int Version; + + [OutputConstructor] + private GetCryptoKeyVersionsVersionResult( + string algorithm, + + string cryptoKey, + + string id, + + string name, + + string protectionLevel, + + ImmutableArray publicKeys, + + string state, + + int version) + { + Algorithm = algorithm; + CryptoKey = cryptoKey; + Id = id; + Name = name; + ProtectionLevel = protectionLevel; + PublicKeys = publicKeys; + State = state; + Version = version; + } + } +} diff --git a/sdk/dotnet/Netapp/ActiveDirectory.cs b/sdk/dotnet/Netapp/ActiveDirectory.cs index 1f278c6c48..9abb2ffbf8 100644 --- a/sdk/dotnet/Netapp/ActiveDirectory.cs +++ b/sdk/dotnet/Netapp/ActiveDirectory.cs @@ -12,7 +12,7 @@ namespace Pulumi.Gcp.Netapp /// /// ActiveDirectory is the public representation of the active directory config. /// - /// To get more information about activeDirectory, see: + /// To get more information about ActiveDirectory, see: /// /// * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.activeDirectories) /// * How-to Guides @@ -74,7 +74,7 @@ namespace Pulumi.Gcp.Netapp /// /// ## Import /// - /// activeDirectory can be imported using any of these accepted formats: + /// ActiveDirectory can be imported using any of these accepted formats: /// /// * `projects/{{project}}/locations/{{location}}/activeDirectories/{{name}}` /// @@ -82,7 +82,7 @@ namespace Pulumi.Gcp.Netapp /// /// * `{{location}}/{{name}}` /// - /// When using the `pulumi import` command, activeDirectory can be imported using one of the formats above. For example: + /// When using the `pulumi import` command, ActiveDirectory can be imported using one of the formats above. For example: /// /// ```sh /// $ pulumi import gcp:netapp/activeDirectory:ActiveDirectory default projects/{{project}}/locations/{{location}}/activeDirectories/{{name}} diff --git a/sdk/dotnet/Netapp/Backup.cs b/sdk/dotnet/Netapp/Backup.cs index 5676a7873b..13786dbd6f 100644 --- a/sdk/dotnet/Netapp/Backup.cs +++ b/sdk/dotnet/Netapp/Backup.cs @@ -25,7 +25,7 @@ namespace Pulumi.Gcp.Netapp /// from a volume or from an existing volume snapshot. Scheduled backups /// require a backup policy. /// - /// To get more information about backup, see: + /// To get more information about Backup, see: /// /// * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults.backups) /// * How-to Guides @@ -94,7 +94,7 @@ namespace Pulumi.Gcp.Netapp /// /// ## Import /// - /// backup can be imported using any of these accepted formats: + /// Backup can be imported using any of these accepted formats: /// /// * `projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}}` /// @@ -102,7 +102,7 @@ namespace Pulumi.Gcp.Netapp /// /// * `{{location}}/{{vault_name}}/{{name}}` /// - /// When using the `pulumi import` command, backup can be imported using one of the formats above. For example: + /// When using the `pulumi import` command, Backup can be imported using one of the formats above. For example: /// /// ```sh /// $ pulumi import gcp:netapp/backup:Backup default projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}} diff --git a/sdk/dotnet/Netapp/BackupPolicy.cs b/sdk/dotnet/Netapp/BackupPolicy.cs index 089045dddf..db01576d2a 100644 --- a/sdk/dotnet/Netapp/BackupPolicy.cs +++ b/sdk/dotnet/Netapp/BackupPolicy.cs @@ -14,7 +14,7 @@ namespace Pulumi.Gcp.Netapp /// Backup policies allow you to attach a backup schedule to a volume. /// The policy defines how many backups to retain at daily, weekly, or monthly intervals. /// - /// To get more information about backupPolicy, see: + /// To get more information about BackupPolicy, see: /// /// * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupPolicies) /// * How-to Guides @@ -52,7 +52,7 @@ namespace Pulumi.Gcp.Netapp /// /// ## Import /// - /// backupPolicy can be imported using any of these accepted formats: + /// BackupPolicy can be imported using any of these accepted formats: /// /// * `projects/{{project}}/locations/{{location}}/backupPolicies/{{name}}` /// @@ -60,7 +60,7 @@ namespace Pulumi.Gcp.Netapp /// /// * `{{location}}/{{name}}` /// - /// When using the `pulumi import` command, backupPolicy can be imported using one of the formats above. For example: + /// When using the `pulumi import` command, BackupPolicy can be imported using one of the formats above. For example: /// /// ```sh /// $ pulumi import gcp:netapp/backupPolicy:BackupPolicy default projects/{{project}}/locations/{{location}}/backupPolicies/{{name}} diff --git a/sdk/dotnet/Netapp/BackupVault.cs b/sdk/dotnet/Netapp/BackupVault.cs index 076e41f287..f6606647e0 100644 --- a/sdk/dotnet/Netapp/BackupVault.cs +++ b/sdk/dotnet/Netapp/BackupVault.cs @@ -13,7 +13,7 @@ namespace Pulumi.Gcp.Netapp /// A backup vault is the location where backups are stored. You can only create one backup vault per region. /// A vault can hold multiple backups for multiple volumes in that region. /// - /// To get more information about backupVault, see: + /// To get more information about BackupVault, see: /// /// * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults) /// * How-to Guides @@ -47,7 +47,7 @@ namespace Pulumi.Gcp.Netapp /// /// ## Import /// - /// backupVault can be imported using any of these accepted formats: + /// BackupVault can be imported using any of these accepted formats: /// /// * `projects/{{project}}/locations/{{location}}/backupVaults/{{name}}` /// @@ -55,7 +55,7 @@ namespace Pulumi.Gcp.Netapp /// /// * `{{location}}/{{name}}` /// - /// When using the `pulumi import` command, backupVault can be imported using one of the formats above. For example: + /// When using the `pulumi import` command, BackupVault can be imported using one of the formats above. For example: /// /// ```sh /// $ pulumi import gcp:netapp/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{name}} diff --git a/sdk/dotnet/Netapp/StoragePool.cs b/sdk/dotnet/Netapp/StoragePool.cs index 432575af4f..05090b9817 100644 --- a/sdk/dotnet/Netapp/StoragePool.cs +++ b/sdk/dotnet/Netapp/StoragePool.cs @@ -77,7 +77,7 @@ namespace Pulumi.Gcp.Netapp /// /// ## Import /// - /// storagePool can be imported using any of these accepted formats: + /// StoragePool can be imported using any of these accepted formats: /// /// * `projects/{{project}}/locations/{{location}}/storagePools/{{name}}` /// @@ -85,7 +85,7 @@ namespace Pulumi.Gcp.Netapp /// /// * `{{location}}/{{name}}` /// - /// When using the `pulumi import` command, storagePool can be imported using one of the formats above. For example: + /// When using the `pulumi import` command, StoragePool can be imported using one of the formats above. For example: /// /// ```sh /// $ pulumi import gcp:netapp/storagePool:StoragePool default projects/{{project}}/locations/{{location}}/storagePools/{{name}} diff --git a/sdk/dotnet/Netapp/Volume.cs b/sdk/dotnet/Netapp/Volume.cs index 8269126cbf..bb1c76473b 100644 --- a/sdk/dotnet/Netapp/Volume.cs +++ b/sdk/dotnet/Netapp/Volume.cs @@ -122,6 +122,7 @@ public partial class Volume : global::Pulumi.CustomResource /// Policy to determine if the volume should be deleted forcefully. /// Volumes may have nested snapshot resources. Deleting such a volume will fail. /// Setting this parameter to FORCE will delete volumes including nested snapshots. + /// Possible values: DEFAULT, FORCE. /// [Output("deletionPolicy")] public Output DeletionPolicy { get; private set; } = null!; @@ -404,6 +405,7 @@ public sealed class VolumeArgs : global::Pulumi.ResourceArgs /// Policy to determine if the volume should be deleted forcefully. /// Volumes may have nested snapshot resources. Deleting such a volume will fail. /// Setting this parameter to FORCE will delete volumes including nested snapshots. + /// Possible values: DEFAULT, FORCE. /// [Input("deletionPolicy")] public Input? DeletionPolicy { get; set; } @@ -587,6 +589,7 @@ public sealed class VolumeState : global::Pulumi.ResourceArgs /// Policy to determine if the volume should be deleted forcefully. /// Volumes may have nested snapshot resources. Deleting such a volume will fail. /// Setting this parameter to FORCE will delete volumes including nested snapshots. + /// Possible values: DEFAULT, FORCE. /// [Input("deletionPolicy")] public Input? DeletionPolicy { get; set; } diff --git a/sdk/dotnet/NetworkConnectivity/Inputs/SpokeLinkedVpcNetworkArgs.cs b/sdk/dotnet/NetworkConnectivity/Inputs/SpokeLinkedVpcNetworkArgs.cs index 52d4f747da..357d51326d 100644 --- a/sdk/dotnet/NetworkConnectivity/Inputs/SpokeLinkedVpcNetworkArgs.cs +++ b/sdk/dotnet/NetworkConnectivity/Inputs/SpokeLinkedVpcNetworkArgs.cs @@ -24,6 +24,18 @@ public InputList ExcludeExportRanges set => _excludeExportRanges = value; } + [Input("includeExportRanges")] + private InputList? _includeExportRanges; + + /// + /// IP ranges allowed to be included from peering. + /// + public InputList IncludeExportRanges + { + get => _includeExportRanges ?? (_includeExportRanges = new InputList()); + set => _includeExportRanges = value; + } + /// /// The URI of the VPC network resource. /// diff --git a/sdk/dotnet/NetworkConnectivity/Inputs/SpokeLinkedVpcNetworkGetArgs.cs b/sdk/dotnet/NetworkConnectivity/Inputs/SpokeLinkedVpcNetworkGetArgs.cs index 25d570d637..79909f6fc5 100644 --- a/sdk/dotnet/NetworkConnectivity/Inputs/SpokeLinkedVpcNetworkGetArgs.cs +++ b/sdk/dotnet/NetworkConnectivity/Inputs/SpokeLinkedVpcNetworkGetArgs.cs @@ -24,6 +24,18 @@ public InputList ExcludeExportRanges set => _excludeExportRanges = value; } + [Input("includeExportRanges")] + private InputList? _includeExportRanges; + + /// + /// IP ranges allowed to be included from peering. + /// + public InputList IncludeExportRanges + { + get => _includeExportRanges ?? (_includeExportRanges = new InputList()); + set => _includeExportRanges = value; + } + /// /// The URI of the VPC network resource. /// diff --git a/sdk/dotnet/NetworkConnectivity/Outputs/SpokeLinkedVpcNetwork.cs b/sdk/dotnet/NetworkConnectivity/Outputs/SpokeLinkedVpcNetwork.cs index 30701fb97f..e75f3bf667 100644 --- a/sdk/dotnet/NetworkConnectivity/Outputs/SpokeLinkedVpcNetwork.cs +++ b/sdk/dotnet/NetworkConnectivity/Outputs/SpokeLinkedVpcNetwork.cs @@ -18,6 +18,10 @@ public sealed class SpokeLinkedVpcNetwork /// public readonly ImmutableArray ExcludeExportRanges; /// + /// IP ranges allowed to be included from peering. + /// + public readonly ImmutableArray IncludeExportRanges; + /// /// The URI of the VPC network resource. /// public readonly string Uri; @@ -26,9 +30,12 @@ public sealed class SpokeLinkedVpcNetwork private SpokeLinkedVpcNetwork( ImmutableArray excludeExportRanges, + ImmutableArray includeExportRanges, + string uri) { ExcludeExportRanges = excludeExportRanges; + IncludeExportRanges = includeExportRanges; Uri = uri; } } diff --git a/sdk/dotnet/NetworkConnectivity/Spoke.cs b/sdk/dotnet/NetworkConnectivity/Spoke.cs index ca613dad59..70030ac111 100644 --- a/sdk/dotnet/NetworkConnectivity/Spoke.cs +++ b/sdk/dotnet/NetworkConnectivity/Spoke.cs @@ -63,6 +63,11 @@ namespace Pulumi.Gcp.NetworkConnectivity /// "198.51.100.0/24", /// "10.10.0.0/16", /// }, + /// IncludeExportRanges = new[] + /// { + /// "198.51.100.0/23", + /// "10.0.0.0/8", + /// }, /// Uri = network.SelfLink, /// }, /// }); diff --git a/sdk/dotnet/NetworkSecurity/ClientTlsPolicy.cs b/sdk/dotnet/NetworkSecurity/ClientTlsPolicy.cs index 68270dfb89..91c4200fdb 100644 --- a/sdk/dotnet/NetworkSecurity/ClientTlsPolicy.cs +++ b/sdk/dotnet/NetworkSecurity/ClientTlsPolicy.cs @@ -10,6 +10,14 @@ namespace Pulumi.Gcp.NetworkSecurity { /// + /// ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. + /// + /// To get more information about ClientTlsPolicy, see: + /// + /// * [API documentation](https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.clientTlsPolicies) + /// * How-to Guides + /// * [Service Security](https://cloud.google.com/traffic-director/docs/security-use-cases) + /// /// ## Example Usage /// /// ### Network Security Client Tls Policy Basic @@ -69,13 +77,6 @@ namespace Pulumi.Gcp.NetworkSecurity /// TargetUri = "unix:mypath", /// }, /// }, - /// new Gcp.NetworkSecurity.Inputs.ClientTlsPolicyServerValidationCaArgs - /// { - /// GrpcEndpoint = new Gcp.NetworkSecurity.Inputs.ClientTlsPolicyServerValidationCaGrpcEndpointArgs - /// { - /// TargetUri = "unix:mypath1", - /// }, - /// }, /// }, /// }); /// diff --git a/sdk/dotnet/NetworkSecurity/ServerTlsPolicy.cs b/sdk/dotnet/NetworkSecurity/ServerTlsPolicy.cs index eeabff6e1f..de8c473c8d 100644 --- a/sdk/dotnet/NetworkSecurity/ServerTlsPolicy.cs +++ b/sdk/dotnet/NetworkSecurity/ServerTlsPolicy.cs @@ -10,6 +10,12 @@ namespace Pulumi.Gcp.NetworkSecurity { /// + /// ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. + /// + /// To get more information about ServerTlsPolicy, see: + /// + /// * [API documentation](https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.serverTlsPolicies) + /// /// ## Example Usage /// /// ### Network Security Server Tls Policy Basic @@ -49,20 +55,6 @@ namespace Pulumi.Gcp.NetworkSecurity /// TargetUri = "unix:mypath", /// }, /// }, - /// new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyClientValidationCaArgs - /// { - /// GrpcEndpoint = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs - /// { - /// TargetUri = "unix:abc/mypath", - /// }, - /// }, - /// new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyClientValidationCaArgs - /// { - /// CertificateProviderInstance = new Gcp.NetworkSecurity.Inputs.ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstanceArgs - /// { - /// PluginInstance = "google_cloud_private_spiffe", - /// }, - /// }, /// }, /// }, /// }); diff --git a/sdk/dotnet/Organizations/GetProject.cs b/sdk/dotnet/Organizations/GetProject.cs index 9ed6c0a2ce..78687aa5cb 100644 --- a/sdk/dotnet/Organizations/GetProject.cs +++ b/sdk/dotnet/Organizations/GetProject.cs @@ -117,6 +117,7 @@ public sealed class GetProjectResult public readonly string OrgId; public readonly string? ProjectId; public readonly ImmutableDictionary PulumiLabels; + public readonly ImmutableDictionary Tags; [OutputConstructor] private GetProjectResult( @@ -142,7 +143,9 @@ private GetProjectResult( string? projectId, - ImmutableDictionary pulumiLabels) + ImmutableDictionary pulumiLabels, + + ImmutableDictionary tags) { AutoCreateNetwork = autoCreateNetwork; BillingAccount = billingAccount; @@ -156,6 +159,7 @@ private GetProjectResult( OrgId = orgId; ProjectId = projectId; PulumiLabels = pulumiLabels; + Tags = tags; } } } diff --git a/sdk/dotnet/Organizations/Project.cs b/sdk/dotnet/Organizations/Project.cs index c196b00c9a..13138c9d36 100644 --- a/sdk/dotnet/Organizations/Project.cs +++ b/sdk/dotnet/Organizations/Project.cs @@ -22,6 +22,10 @@ namespace Pulumi.Gcp.Organizations /// /// > This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account. /// + /// > It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `auto_create_network` to false, when possible. + /// + /// > It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. + /// /// To get more information about projects, see: /// /// * [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects) @@ -74,6 +78,30 @@ namespace Pulumi.Gcp.Organizations /// }); /// ``` /// + /// To create a project with a tag + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var myProject = new Gcp.Organizations.Project("my_project", new() + /// { + /// Name = "My Project", + /// ProjectId = "your-project-id", + /// OrgId = "1234567", + /// Tags = + /// { + /// { "1234567/env", "staging" }, + /// }, + /// }); + /// + /// }); + /// ``` + /// /// ## Import /// /// Projects can be imported using the `project_id`, e.g. @@ -169,6 +197,12 @@ public partial class Project : global::Pulumi.CustomResource [Output("pulumiLabels")] public Output> PulumiLabels { get; private set; } = null!; + /// + /// A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + /// + [Output("tags")] + public Output?> Tags { get; private set; } = null!; + /// /// Create a Project resource with the given unique name, arguments, and options. @@ -288,6 +322,18 @@ public InputMap Labels [Input("projectId")] public Input? ProjectId { get; set; } + [Input("tags")] + private InputMap? _tags; + + /// + /// A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + /// + public InputMap Tags + { + get => _tags ?? (_tags = new InputMap()); + set => _tags = value; + } + public ProjectArgs() { } @@ -402,6 +448,18 @@ public InputMap PulumiLabels } } + [Input("tags")] + private InputMap? _tags; + + /// + /// A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + /// + public InputMap Tags + { + get => _tags ?? (_tags = new InputMap()); + set => _tags = value; + } + public ProjectState() { } diff --git a/sdk/dotnet/ParallelStore/Instance.cs b/sdk/dotnet/ParallelStore/Instance.cs index ade9428fe1..3a54d96dc9 100644 --- a/sdk/dotnet/ParallelStore/Instance.cs +++ b/sdk/dotnet/ParallelStore/Instance.cs @@ -121,7 +121,7 @@ public partial class Instance : global::Pulumi.CustomResource public Output CreateTime { get; private set; } = null!; /// - /// The version of DAOS software running in the instance + /// The version of DAOS software running in the instance. /// [Output("daosVersion")] public Output DaosVersion { get; private set; } = null!; @@ -152,9 +152,9 @@ public partial class Instance : global::Pulumi.CustomResource public Output> EffectiveLabels { get; private set; } = null!; /// - /// Immutable. Contains the id of the allocated IP address range associated with the - /// private service access connection for example, "test-default" associated - /// with IP range 10.0.0.0/29. This field is populated by the service and + /// Immutable. Contains the id of the allocated IP address + /// range associated with the private service access connection for example, \"test-default\" + /// associated with IP range 10.0.0.0/29. This field is populated by the service /// and contains the value currently used by the service. /// [Output("effectiveReservedIpRange")] @@ -188,12 +188,12 @@ public partial class Instance : global::Pulumi.CustomResource public Output InstanceId { get; private set; } = null!; /// - /// Cloud Labels are a flexible and lightweight mechanism for organizing cloud - /// resources into groups that reflect a customer's organizational needs and - /// deployment strategies. Cloud Labels can be used to filter collections of - /// resources. They can be used to control how resource metrics are aggregated. - /// And they can be used as arguments to policy management rules (e.g. route, - /// firewall, load balancing, etc.). + /// Cloud Labels are a flexible and lightweight mechanism for + /// organizing cloud resources into groups that reflect a customer's organizational + /// needs and deployment strategies. Cloud Labels can be used to filter collections + /// of resources. They can be used to control how resource metrics are aggregated. + /// And they can be used as arguments to policy management rules (e.g. route, firewall, + /// load balancing, etc.). /// * Label keys must be between 1 and 63 characters long and must conform to /// the following regular expression: `a-z{0,62}`. /// * Label values must be between 0 and 63 characters long and must conform @@ -204,8 +204,9 @@ public partial class Instance : global::Pulumi.CustomResource /// characters may be allowed in the future. Therefore, you are advised to use /// an internal label representation, such as JSON, which doesn't rely upon /// specific characters being disallowed. For example, representing labels - /// as the string: name + "_" + value would prove problematic if we were to - /// allow "_" in a future release. + /// as the string: `name + "_" + value` would prove problematic if we were to + /// allow `"_"` in a future release. " + /// /// **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. /// Please refer to the field `effective_labels` for all of the labels present on the resource. /// @@ -226,9 +227,8 @@ public partial class Instance : global::Pulumi.CustomResource public Output Name { get; private set; } = null!; /// - /// Immutable. The name of the Google Compute Engine - /// [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - /// instance is connected. + /// Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + /// to which the instance is connected. /// [Output("network")] public Output Network { get; private set; } = null!; @@ -248,10 +248,10 @@ public partial class Instance : global::Pulumi.CustomResource public Output> PulumiLabels { get; private set; } = null!; /// - /// Immutable. Contains the id of the allocated IP address range associated with the - /// private service access connection for example, "test-default" associated - /// with IP range 10.0.0.0/29. If no range id is provided all ranges will be - /// considered. + /// Immutable. Contains the id of the allocated IP address range + /// associated with the private service access connection for example, \"test-default\" + /// associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + /// be considered. /// [Output("reservedIpRange")] public Output ReservedIpRange { get; private set; } = null!; @@ -382,12 +382,12 @@ public sealed class InstanceArgs : global::Pulumi.ResourceArgs private InputMap? _labels; /// - /// Cloud Labels are a flexible and lightweight mechanism for organizing cloud - /// resources into groups that reflect a customer's organizational needs and - /// deployment strategies. Cloud Labels can be used to filter collections of - /// resources. They can be used to control how resource metrics are aggregated. - /// And they can be used as arguments to policy management rules (e.g. route, - /// firewall, load balancing, etc.). + /// Cloud Labels are a flexible and lightweight mechanism for + /// organizing cloud resources into groups that reflect a customer's organizational + /// needs and deployment strategies. Cloud Labels can be used to filter collections + /// of resources. They can be used to control how resource metrics are aggregated. + /// And they can be used as arguments to policy management rules (e.g. route, firewall, + /// load balancing, etc.). /// * Label keys must be between 1 and 63 characters long and must conform to /// the following regular expression: `a-z{0,62}`. /// * Label values must be between 0 and 63 characters long and must conform @@ -398,8 +398,9 @@ public sealed class InstanceArgs : global::Pulumi.ResourceArgs /// characters may be allowed in the future. Therefore, you are advised to use /// an internal label representation, such as JSON, which doesn't rely upon /// specific characters being disallowed. For example, representing labels - /// as the string: name + "_" + value would prove problematic if we were to - /// allow "_" in a future release. + /// as the string: `name + "_" + value` would prove problematic if we were to + /// allow `"_"` in a future release. " + /// /// **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. /// Please refer to the field `effective_labels` for all of the labels present on the resource. /// @@ -416,9 +417,8 @@ public InputMap Labels public Input Location { get; set; } = null!; /// - /// Immutable. The name of the Google Compute Engine - /// [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - /// instance is connected. + /// Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + /// to which the instance is connected. /// [Input("network")] public Input? Network { get; set; } @@ -431,10 +431,10 @@ public InputMap Labels public Input? Project { get; set; } /// - /// Immutable. Contains the id of the allocated IP address range associated with the - /// private service access connection for example, "test-default" associated - /// with IP range 10.0.0.0/29. If no range id is provided all ranges will be - /// considered. + /// Immutable. Contains the id of the allocated IP address range + /// associated with the private service access connection for example, \"test-default\" + /// associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + /// be considered. /// [Input("reservedIpRange")] public Input? ReservedIpRange { get; set; } @@ -473,7 +473,7 @@ public InputList AccessPoints public Input? CreateTime { get; set; } /// - /// The version of DAOS software running in the instance + /// The version of DAOS software running in the instance. /// [Input("daosVersion")] public Input? DaosVersion { get; set; } @@ -514,9 +514,9 @@ public InputMap EffectiveLabels } /// - /// Immutable. Contains the id of the allocated IP address range associated with the - /// private service access connection for example, "test-default" associated - /// with IP range 10.0.0.0/29. This field is populated by the service and + /// Immutable. Contains the id of the allocated IP address + /// range associated with the private service access connection for example, \"test-default\" + /// associated with IP range 10.0.0.0/29. This field is populated by the service /// and contains the value currently used by the service. /// [Input("effectiveReservedIpRange")] @@ -553,12 +553,12 @@ public InputMap EffectiveLabels private InputMap? _labels; /// - /// Cloud Labels are a flexible and lightweight mechanism for organizing cloud - /// resources into groups that reflect a customer's organizational needs and - /// deployment strategies. Cloud Labels can be used to filter collections of - /// resources. They can be used to control how resource metrics are aggregated. - /// And they can be used as arguments to policy management rules (e.g. route, - /// firewall, load balancing, etc.). + /// Cloud Labels are a flexible and lightweight mechanism for + /// organizing cloud resources into groups that reflect a customer's organizational + /// needs and deployment strategies. Cloud Labels can be used to filter collections + /// of resources. They can be used to control how resource metrics are aggregated. + /// And they can be used as arguments to policy management rules (e.g. route, firewall, + /// load balancing, etc.). /// * Label keys must be between 1 and 63 characters long and must conform to /// the following regular expression: `a-z{0,62}`. /// * Label values must be between 0 and 63 characters long and must conform @@ -569,8 +569,9 @@ public InputMap EffectiveLabels /// characters may be allowed in the future. Therefore, you are advised to use /// an internal label representation, such as JSON, which doesn't rely upon /// specific characters being disallowed. For example, representing labels - /// as the string: name + "_" + value would prove problematic if we were to - /// allow "_" in a future release. + /// as the string: `name + "_" + value` would prove problematic if we were to + /// allow `"_"` in a future release. " + /// /// **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. /// Please refer to the field `effective_labels` for all of the labels present on the resource. /// @@ -594,9 +595,8 @@ public InputMap Labels public Input? Name { get; set; } /// - /// Immutable. The name of the Google Compute Engine - /// [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - /// instance is connected. + /// Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + /// to which the instance is connected. /// [Input("network")] public Input? Network { get; set; } @@ -626,10 +626,10 @@ public InputMap PulumiLabels } /// - /// Immutable. Contains the id of the allocated IP address range associated with the - /// private service access connection for example, "test-default" associated - /// with IP range 10.0.0.0/29. If no range id is provided all ranges will be - /// considered. + /// Immutable. Contains the id of the allocated IP address range + /// associated with the private service access connection for example, \"test-default\" + /// associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + /// be considered. /// [Input("reservedIpRange")] public Input? ReservedIpRange { get; set; } diff --git a/sdk/dotnet/Projects/IamMemberRemove.cs b/sdk/dotnet/Projects/IamMemberRemove.cs index c978a5d718..08f2c133e3 100644 --- a/sdk/dotnet/Projects/IamMemberRemove.cs +++ b/sdk/dotnet/Projects/IamMemberRemove.cs @@ -29,6 +29,28 @@ namespace Pulumi.Gcp.Projects /// [the official documentation](https://cloud.google.com/iam/docs/granting-changing-revoking-access) /// and /// [API reference](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy). + /// + /// ## Example Usage + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var targetProject = Gcp.Organizations.GetProject.Invoke(); + /// + /// var foo = new Gcp.Projects.IamMemberRemove("foo", new() + /// { + /// Role = "roles/editor", + /// Project = targetProjectGoogleProject.ProjectId, + /// Member = $"serviceAccount:{targetProjectGoogleProject.Number}-compute@developer.gserviceaccount.com", + /// }); + /// + /// }); + /// ``` /// [GcpResourceType("gcp:projects/iamMemberRemove:IamMemberRemove")] public partial class IamMemberRemove : global::Pulumi.CustomResource diff --git a/sdk/dotnet/Projects/UsageExportBucket.cs b/sdk/dotnet/Projects/UsageExportBucket.cs index d3db9cbb32..0f763d9c7d 100644 --- a/sdk/dotnet/Projects/UsageExportBucket.cs +++ b/sdk/dotnet/Projects/UsageExportBucket.cs @@ -22,6 +22,10 @@ namespace Pulumi.Gcp.Projects /// /// > This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account. /// + /// > It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `auto_create_network` to false, when possible. + /// + /// > It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. + /// /// To get more information about projects, see: /// /// * [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects) @@ -74,6 +78,30 @@ namespace Pulumi.Gcp.Projects /// }); /// ``` /// + /// To create a project with a tag + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var myProject = new Gcp.Organizations.Project("my_project", new() + /// { + /// Name = "My Project", + /// ProjectId = "your-project-id", + /// OrgId = "1234567", + /// Tags = + /// { + /// { "1234567/env", "staging" }, + /// }, + /// }); + /// + /// }); + /// ``` + /// /// ## Import /// /// Projects can be imported using the `project_id`, e.g. diff --git a/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigArgs.cs b/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigArgs.cs index 21cffa5a66..fa105bd7b3 100644 --- a/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigArgs.cs +++ b/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigArgs.cs @@ -58,6 +58,12 @@ public sealed class SubscriptionCloudStorageConfigArgs : global::Pulumi.Resource [Input("maxDuration")] public Input? MaxDuration { get; set; } + /// + /// The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + /// + [Input("maxMessages")] + public Input? MaxMessages { get; set; } + /// /// The service account to use to write to Cloud Storage. If not specified, the Pub/Sub /// [service agent](https://cloud.google.com/iam/docs/service-agents), diff --git a/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigAvroConfigArgs.cs b/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigAvroConfigArgs.cs index 031021cbad..3c7ac089a8 100644 --- a/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigAvroConfigArgs.cs +++ b/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigAvroConfigArgs.cs @@ -12,6 +12,12 @@ namespace Pulumi.Gcp.PubSub.Inputs public sealed class SubscriptionCloudStorageConfigAvroConfigArgs : global::Pulumi.ResourceArgs { + /// + /// When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + /// + [Input("useTopicSchema")] + public Input? UseTopicSchema { get; set; } + /// /// When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. /// diff --git a/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigAvroConfigGetArgs.cs b/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigAvroConfigGetArgs.cs index cab6538713..a84fa2ff1d 100644 --- a/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigAvroConfigGetArgs.cs +++ b/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigAvroConfigGetArgs.cs @@ -12,6 +12,12 @@ namespace Pulumi.Gcp.PubSub.Inputs public sealed class SubscriptionCloudStorageConfigAvroConfigGetArgs : global::Pulumi.ResourceArgs { + /// + /// When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + /// + [Input("useTopicSchema")] + public Input? UseTopicSchema { get; set; } + /// /// When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. /// diff --git a/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigGetArgs.cs b/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigGetArgs.cs index 2caf4bfb6f..f8ebfa88f3 100644 --- a/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigGetArgs.cs +++ b/sdk/dotnet/PubSub/Inputs/SubscriptionCloudStorageConfigGetArgs.cs @@ -58,6 +58,12 @@ public sealed class SubscriptionCloudStorageConfigGetArgs : global::Pulumi.Resou [Input("maxDuration")] public Input? MaxDuration { get; set; } + /// + /// The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + /// + [Input("maxMessages")] + public Input? MaxMessages { get; set; } + /// /// The service account to use to write to Cloud Storage. If not specified, the Pub/Sub /// [service agent](https://cloud.google.com/iam/docs/service-agents), diff --git a/sdk/dotnet/PubSub/Outputs/GetSubscriptionCloudStorageConfigAvroConfigResult.cs b/sdk/dotnet/PubSub/Outputs/GetSubscriptionCloudStorageConfigAvroConfigResult.cs index 059eb17474..233b50e50b 100644 --- a/sdk/dotnet/PubSub/Outputs/GetSubscriptionCloudStorageConfigAvroConfigResult.cs +++ b/sdk/dotnet/PubSub/Outputs/GetSubscriptionCloudStorageConfigAvroConfigResult.cs @@ -13,14 +13,22 @@ namespace Pulumi.Gcp.PubSub.Outputs [OutputType] public sealed class GetSubscriptionCloudStorageConfigAvroConfigResult { + /// + /// When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + /// + public readonly bool UseTopicSchema; /// /// When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. /// public readonly bool WriteMetadata; [OutputConstructor] - private GetSubscriptionCloudStorageConfigAvroConfigResult(bool writeMetadata) + private GetSubscriptionCloudStorageConfigAvroConfigResult( + bool useTopicSchema, + + bool writeMetadata) { + UseTopicSchema = useTopicSchema; WriteMetadata = writeMetadata; } } diff --git a/sdk/dotnet/PubSub/Outputs/GetSubscriptionCloudStorageConfigResult.cs b/sdk/dotnet/PubSub/Outputs/GetSubscriptionCloudStorageConfigResult.cs index 7ff9772e3f..196bc1aa09 100644 --- a/sdk/dotnet/PubSub/Outputs/GetSubscriptionCloudStorageConfigResult.cs +++ b/sdk/dotnet/PubSub/Outputs/GetSubscriptionCloudStorageConfigResult.cs @@ -45,6 +45,10 @@ public sealed class GetSubscriptionCloudStorageConfigResult /// public readonly string MaxDuration; /// + /// The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + /// + public readonly int MaxMessages; + /// /// The service account to use to write to Cloud Storage. If not specified, the Pub/Sub /// [service agent](https://cloud.google.com/iam/docs/service-agents), /// service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -71,6 +75,8 @@ private GetSubscriptionCloudStorageConfigResult( string maxDuration, + int maxMessages, + string serviceAccountEmail, string state) @@ -82,6 +88,7 @@ private GetSubscriptionCloudStorageConfigResult( FilenameSuffix = filenameSuffix; MaxBytes = maxBytes; MaxDuration = maxDuration; + MaxMessages = maxMessages; ServiceAccountEmail = serviceAccountEmail; State = state; } diff --git a/sdk/dotnet/PubSub/Outputs/SubscriptionCloudStorageConfig.cs b/sdk/dotnet/PubSub/Outputs/SubscriptionCloudStorageConfig.cs index a53822f6d8..c6fe6e3e91 100644 --- a/sdk/dotnet/PubSub/Outputs/SubscriptionCloudStorageConfig.cs +++ b/sdk/dotnet/PubSub/Outputs/SubscriptionCloudStorageConfig.cs @@ -46,6 +46,10 @@ public sealed class SubscriptionCloudStorageConfig /// public readonly string? MaxDuration; /// + /// The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + /// + public readonly int? MaxMessages; + /// /// The service account to use to write to Cloud Storage. If not specified, the Pub/Sub /// [service agent](https://cloud.google.com/iam/docs/service-agents), /// service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -73,6 +77,8 @@ private SubscriptionCloudStorageConfig( string? maxDuration, + int? maxMessages, + string? serviceAccountEmail, string? state) @@ -84,6 +90,7 @@ private SubscriptionCloudStorageConfig( FilenameSuffix = filenameSuffix; MaxBytes = maxBytes; MaxDuration = maxDuration; + MaxMessages = maxMessages; ServiceAccountEmail = serviceAccountEmail; State = state; } diff --git a/sdk/dotnet/PubSub/Outputs/SubscriptionCloudStorageConfigAvroConfig.cs b/sdk/dotnet/PubSub/Outputs/SubscriptionCloudStorageConfigAvroConfig.cs index 4d997954fd..0b362d7380 100644 --- a/sdk/dotnet/PubSub/Outputs/SubscriptionCloudStorageConfigAvroConfig.cs +++ b/sdk/dotnet/PubSub/Outputs/SubscriptionCloudStorageConfigAvroConfig.cs @@ -13,14 +13,22 @@ namespace Pulumi.Gcp.PubSub.Outputs [OutputType] public sealed class SubscriptionCloudStorageConfigAvroConfig { + /// + /// When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + /// + public readonly bool? UseTopicSchema; /// /// When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. /// public readonly bool? WriteMetadata; [OutputConstructor] - private SubscriptionCloudStorageConfigAvroConfig(bool? writeMetadata) + private SubscriptionCloudStorageConfigAvroConfig( + bool? useTopicSchema, + + bool? writeMetadata) { + UseTopicSchema = useTopicSchema; WriteMetadata = writeMetadata; } } diff --git a/sdk/dotnet/PubSub/Subscription.cs b/sdk/dotnet/PubSub/Subscription.cs index e2471dec8c..efa2d29b64 100644 --- a/sdk/dotnet/PubSub/Subscription.cs +++ b/sdk/dotnet/PubSub/Subscription.cs @@ -415,6 +415,7 @@ namespace Pulumi.Gcp.PubSub /// FilenameDatetimeFormat = "YYYY-MM-DD/hh_mm_ssZ", /// MaxBytes = 1000, /// MaxDuration = "300s", + /// MaxMessages = 1000, /// }, /// }, new CustomResourceOptions /// { @@ -470,9 +471,11 @@ namespace Pulumi.Gcp.PubSub /// FilenameDatetimeFormat = "YYYY-MM-DD/hh_mm_ssZ", /// MaxBytes = 1000, /// MaxDuration = "300s", + /// MaxMessages = 1000, /// AvroConfig = new Gcp.PubSub.Inputs.SubscriptionCloudStorageConfigAvroConfigArgs /// { /// WriteMetadata = true, + /// UseTopicSchema = true, /// }, /// }, /// }, new CustomResourceOptions diff --git a/sdk/dotnet/Redis/Cluster.cs b/sdk/dotnet/Redis/Cluster.cs index 94381554b1..0f85c1715f 100644 --- a/sdk/dotnet/Redis/Cluster.cs +++ b/sdk/dotnet/Redis/Cluster.cs @@ -85,6 +85,23 @@ namespace Pulumi.Gcp.Redis /// { /// Mode = "MULTI_ZONE", /// }, + /// MaintenancePolicy = new Gcp.Redis.Inputs.ClusterMaintenancePolicyArgs + /// { + /// WeeklyMaintenanceWindows = new[] + /// { + /// new Gcp.Redis.Inputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs + /// { + /// Day = "MONDAY", + /// StartTime = new Gcp.Redis.Inputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs + /// { + /// Hours = 1, + /// Minutes = 0, + /// Seconds = 0, + /// Nanos = 0, + /// }, + /// }, + /// }, + /// }, /// }, new CustomResourceOptions /// { /// DependsOn = @@ -152,6 +169,23 @@ namespace Pulumi.Gcp.Redis /// Mode = "SINGLE_ZONE", /// Zone = "us-central1-f", /// }, + /// MaintenancePolicy = new Gcp.Redis.Inputs.ClusterMaintenancePolicyArgs + /// { + /// WeeklyMaintenanceWindows = new[] + /// { + /// new Gcp.Redis.Inputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs + /// { + /// Day = "MONDAY", + /// StartTime = new Gcp.Redis.Inputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs + /// { + /// Hours = 1, + /// Minutes = 0, + /// Seconds = 0, + /// Nanos = 0, + /// }, + /// }, + /// }, + /// }, /// DeletionProtectionEnabled = true, /// }, new CustomResourceOptions /// { @@ -229,6 +263,19 @@ public partial class Cluster : global::Pulumi.CustomResource [Output("discoveryEndpoints")] public Output> DiscoveryEndpoints { get; private set; } = null!; + /// + /// Maintenance policy for a cluster + /// + [Output("maintenancePolicy")] + public Output MaintenancePolicy { get; private set; } = null!; + + /// + /// Upcoming maintenance schedule. + /// Structure is documented below. + /// + [Output("maintenanceSchedules")] + public Output> MaintenanceSchedules { get; private set; } = null!; + /// /// Unique name of the resource in this scope including project and location using the form: /// projects/{projectId}/locations/{locationId}/clusters/{clusterId} @@ -394,6 +441,12 @@ public sealed class ClusterArgs : global::Pulumi.ResourceArgs [Input("deletionProtectionEnabled")] public Input? DeletionProtectionEnabled { get; set; } + /// + /// Maintenance policy for a cluster + /// + [Input("maintenancePolicy")] + public Input? MaintenancePolicy { get; set; } + /// /// Unique name of the resource in this scope including project and location using the form: /// projects/{projectId}/locations/{locationId}/clusters/{clusterId} @@ -518,6 +571,25 @@ public InputList DiscoveryEndpoints set => _discoveryEndpoints = value; } + /// + /// Maintenance policy for a cluster + /// + [Input("maintenancePolicy")] + public Input? MaintenancePolicy { get; set; } + + [Input("maintenanceSchedules")] + private InputList? _maintenanceSchedules; + + /// + /// Upcoming maintenance schedule. + /// Structure is documented below. + /// + public InputList MaintenanceSchedules + { + get => _maintenanceSchedules ?? (_maintenanceSchedules = new InputList()); + set => _maintenanceSchedules = value; + } + /// /// Unique name of the resource in this scope including project and location using the form: /// projects/{projectId}/locations/{locationId}/clusters/{clusterId} diff --git a/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyArgs.cs b/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyArgs.cs new file mode 100644 index 0000000000..483f1b3501 --- /dev/null +++ b/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyArgs.cs @@ -0,0 +1,53 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Redis.Inputs +{ + + public sealed class ClusterMaintenancePolicyArgs : global::Pulumi.ResourceArgs + { + /// + /// (Output) + /// Output only. The time when the policy was created. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + [Input("createTime")] + public Input? CreateTime { get; set; } + + /// + /// (Output) + /// Output only. The time when the policy was last updated. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + [Input("updateTime")] + public Input? UpdateTime { get; set; } + + [Input("weeklyMaintenanceWindows")] + private InputList? _weeklyMaintenanceWindows; + + /// + /// Optional. Maintenance window that is applied to resources covered by this policy. + /// Minimum 1. For the current version, the maximum number + /// of weekly_window is expected to be one. + /// Structure is documented below. + /// + public InputList WeeklyMaintenanceWindows + { + get => _weeklyMaintenanceWindows ?? (_weeklyMaintenanceWindows = new InputList()); + set => _weeklyMaintenanceWindows = value; + } + + public ClusterMaintenancePolicyArgs() + { + } + public static new ClusterMaintenancePolicyArgs Empty => new ClusterMaintenancePolicyArgs(); + } +} diff --git a/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyGetArgs.cs b/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyGetArgs.cs new file mode 100644 index 0000000000..60554a48fd --- /dev/null +++ b/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyGetArgs.cs @@ -0,0 +1,53 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Redis.Inputs +{ + + public sealed class ClusterMaintenancePolicyGetArgs : global::Pulumi.ResourceArgs + { + /// + /// (Output) + /// Output only. The time when the policy was created. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + [Input("createTime")] + public Input? CreateTime { get; set; } + + /// + /// (Output) + /// Output only. The time when the policy was last updated. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + [Input("updateTime")] + public Input? UpdateTime { get; set; } + + [Input("weeklyMaintenanceWindows")] + private InputList? _weeklyMaintenanceWindows; + + /// + /// Optional. Maintenance window that is applied to resources covered by this policy. + /// Minimum 1. For the current version, the maximum number + /// of weekly_window is expected to be one. + /// Structure is documented below. + /// + public InputList WeeklyMaintenanceWindows + { + get => _weeklyMaintenanceWindows ?? (_weeklyMaintenanceWindows = new InputList()); + set => _weeklyMaintenanceWindows = value; + } + + public ClusterMaintenancePolicyGetArgs() + { + } + public static new ClusterMaintenancePolicyGetArgs Empty => new ClusterMaintenancePolicyGetArgs(); + } +} diff --git a/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs.cs b/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs.cs new file mode 100644 index 0000000000..b8807f6eea --- /dev/null +++ b/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs.cs @@ -0,0 +1,52 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Redis.Inputs +{ + + public sealed class ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs : global::Pulumi.ResourceArgs + { + /// + /// Required. The day of week that maintenance updates occur. + /// - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + /// - MONDAY: Monday + /// - TUESDAY: Tuesday + /// - WEDNESDAY: Wednesday + /// - THURSDAY: Thursday + /// - FRIDAY: Friday + /// - SATURDAY: Saturday + /// - SUNDAY: Sunday + /// Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + /// + [Input("day", required: true)] + public Input Day { get; set; } = null!; + + /// + /// (Output) + /// Output only. Duration of the maintenance window. + /// The current window is fixed at 1 hour. + /// A duration in seconds with up to nine fractional digits, + /// terminated by 's'. Example: "3.5s". + /// + [Input("duration")] + public Input? Duration { get; set; } + + /// + /// Required. Start time of the window in UTC time. + /// Structure is documented below. + /// + [Input("startTime", required: true)] + public Input StartTime { get; set; } = null!; + + public ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs() + { + } + public static new ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs Empty => new ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs(); + } +} diff --git a/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowGetArgs.cs b/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowGetArgs.cs new file mode 100644 index 0000000000..b3aba2d652 --- /dev/null +++ b/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowGetArgs.cs @@ -0,0 +1,52 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Redis.Inputs +{ + + public sealed class ClusterMaintenancePolicyWeeklyMaintenanceWindowGetArgs : global::Pulumi.ResourceArgs + { + /// + /// Required. The day of week that maintenance updates occur. + /// - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + /// - MONDAY: Monday + /// - TUESDAY: Tuesday + /// - WEDNESDAY: Wednesday + /// - THURSDAY: Thursday + /// - FRIDAY: Friday + /// - SATURDAY: Saturday + /// - SUNDAY: Sunday + /// Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + /// + [Input("day", required: true)] + public Input Day { get; set; } = null!; + + /// + /// (Output) + /// Output only. Duration of the maintenance window. + /// The current window is fixed at 1 hour. + /// A duration in seconds with up to nine fractional digits, + /// terminated by 's'. Example: "3.5s". + /// + [Input("duration")] + public Input? Duration { get; set; } + + /// + /// Required. Start time of the window in UTC time. + /// Structure is documented below. + /// + [Input("startTime", required: true)] + public Input StartTime { get; set; } = null!; + + public ClusterMaintenancePolicyWeeklyMaintenanceWindowGetArgs() + { + } + public static new ClusterMaintenancePolicyWeeklyMaintenanceWindowGetArgs Empty => new ClusterMaintenancePolicyWeeklyMaintenanceWindowGetArgs(); + } +} diff --git a/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs.cs b/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs.cs new file mode 100644 index 0000000000..a72e1ba7a1 --- /dev/null +++ b/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs.cs @@ -0,0 +1,46 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Redis.Inputs +{ + + public sealed class ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs : global::Pulumi.ResourceArgs + { + /// + /// Hours of day in 24 hour format. Should be from 0 to 23. + /// An API may choose to allow the value "24:00:00" for scenarios like business closing time. + /// + [Input("hours")] + public Input? Hours { get; set; } + + /// + /// Minutes of hour of day. Must be from 0 to 59. + /// + [Input("minutes")] + public Input? Minutes { get; set; } + + /// + /// Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + /// + [Input("nanos")] + public Input? Nanos { get; set; } + + /// + /// Seconds of minutes of the time. Must normally be from 0 to 59. + /// An API may allow the value 60 if it allows leap-seconds. + /// + [Input("seconds")] + public Input? Seconds { get; set; } + + public ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs() + { + } + public static new ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs Empty => new ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs(); + } +} diff --git a/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeGetArgs.cs b/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeGetArgs.cs new file mode 100644 index 0000000000..8964df5fa2 --- /dev/null +++ b/sdk/dotnet/Redis/Inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeGetArgs.cs @@ -0,0 +1,46 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Redis.Inputs +{ + + public sealed class ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeGetArgs : global::Pulumi.ResourceArgs + { + /// + /// Hours of day in 24 hour format. Should be from 0 to 23. + /// An API may choose to allow the value "24:00:00" for scenarios like business closing time. + /// + [Input("hours")] + public Input? Hours { get; set; } + + /// + /// Minutes of hour of day. Must be from 0 to 59. + /// + [Input("minutes")] + public Input? Minutes { get; set; } + + /// + /// Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + /// + [Input("nanos")] + public Input? Nanos { get; set; } + + /// + /// Seconds of minutes of the time. Must normally be from 0 to 59. + /// An API may allow the value 60 if it allows leap-seconds. + /// + [Input("seconds")] + public Input? Seconds { get; set; } + + public ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeGetArgs() + { + } + public static new ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeGetArgs Empty => new ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeGetArgs(); + } +} diff --git a/sdk/dotnet/Redis/Inputs/ClusterMaintenanceScheduleArgs.cs b/sdk/dotnet/Redis/Inputs/ClusterMaintenanceScheduleArgs.cs new file mode 100644 index 0000000000..d852839409 --- /dev/null +++ b/sdk/dotnet/Redis/Inputs/ClusterMaintenanceScheduleArgs.cs @@ -0,0 +1,48 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Redis.Inputs +{ + + public sealed class ClusterMaintenanceScheduleArgs : global::Pulumi.ResourceArgs + { + /// + /// (Output) + /// Output only. The end time of any upcoming scheduled maintenance for this cluster. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + [Input("endTime")] + public Input? EndTime { get; set; } + + /// + /// (Output) + /// Output only. The deadline that the maintenance schedule start time + /// can not go beyond, including reschedule. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + [Input("scheduleDeadlineTime")] + public Input? ScheduleDeadlineTime { get; set; } + + /// + /// (Output) + /// Output only. The start time of any upcoming scheduled maintenance for this cluster. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + [Input("startTime")] + public Input? StartTime { get; set; } + + public ClusterMaintenanceScheduleArgs() + { + } + public static new ClusterMaintenanceScheduleArgs Empty => new ClusterMaintenanceScheduleArgs(); + } +} diff --git a/sdk/dotnet/Redis/Inputs/ClusterMaintenanceScheduleGetArgs.cs b/sdk/dotnet/Redis/Inputs/ClusterMaintenanceScheduleGetArgs.cs new file mode 100644 index 0000000000..57981df589 --- /dev/null +++ b/sdk/dotnet/Redis/Inputs/ClusterMaintenanceScheduleGetArgs.cs @@ -0,0 +1,48 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Redis.Inputs +{ + + public sealed class ClusterMaintenanceScheduleGetArgs : global::Pulumi.ResourceArgs + { + /// + /// (Output) + /// Output only. The end time of any upcoming scheduled maintenance for this cluster. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + [Input("endTime")] + public Input? EndTime { get; set; } + + /// + /// (Output) + /// Output only. The deadline that the maintenance schedule start time + /// can not go beyond, including reschedule. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + [Input("scheduleDeadlineTime")] + public Input? ScheduleDeadlineTime { get; set; } + + /// + /// (Output) + /// Output only. The start time of any upcoming scheduled maintenance for this cluster. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + [Input("startTime")] + public Input? StartTime { get; set; } + + public ClusterMaintenanceScheduleGetArgs() + { + } + public static new ClusterMaintenanceScheduleGetArgs Empty => new ClusterMaintenanceScheduleGetArgs(); + } +} diff --git a/sdk/dotnet/Redis/Outputs/ClusterMaintenancePolicy.cs b/sdk/dotnet/Redis/Outputs/ClusterMaintenancePolicy.cs new file mode 100644 index 0000000000..dde0a0d7e4 --- /dev/null +++ b/sdk/dotnet/Redis/Outputs/ClusterMaintenancePolicy.cs @@ -0,0 +1,51 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Redis.Outputs +{ + + [OutputType] + public sealed class ClusterMaintenancePolicy + { + /// + /// (Output) + /// Output only. The time when the policy was created. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + public readonly string? CreateTime; + /// + /// (Output) + /// Output only. The time when the policy was last updated. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + public readonly string? UpdateTime; + /// + /// Optional. Maintenance window that is applied to resources covered by this policy. + /// Minimum 1. For the current version, the maximum number + /// of weekly_window is expected to be one. + /// Structure is documented below. + /// + public readonly ImmutableArray WeeklyMaintenanceWindows; + + [OutputConstructor] + private ClusterMaintenancePolicy( + string? createTime, + + string? updateTime, + + ImmutableArray weeklyMaintenanceWindows) + { + CreateTime = createTime; + UpdateTime = updateTime; + WeeklyMaintenanceWindows = weeklyMaintenanceWindows; + } + } +} diff --git a/sdk/dotnet/Redis/Outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindow.cs b/sdk/dotnet/Redis/Outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindow.cs new file mode 100644 index 0000000000..69c9ba433c --- /dev/null +++ b/sdk/dotnet/Redis/Outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindow.cs @@ -0,0 +1,56 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Redis.Outputs +{ + + [OutputType] + public sealed class ClusterMaintenancePolicyWeeklyMaintenanceWindow + { + /// + /// Required. The day of week that maintenance updates occur. + /// - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + /// - MONDAY: Monday + /// - TUESDAY: Tuesday + /// - WEDNESDAY: Wednesday + /// - THURSDAY: Thursday + /// - FRIDAY: Friday + /// - SATURDAY: Saturday + /// - SUNDAY: Sunday + /// Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + /// + public readonly string Day; + /// + /// (Output) + /// Output only. Duration of the maintenance window. + /// The current window is fixed at 1 hour. + /// A duration in seconds with up to nine fractional digits, + /// terminated by 's'. Example: "3.5s". + /// + public readonly string? Duration; + /// + /// Required. Start time of the window in UTC time. + /// Structure is documented below. + /// + public readonly Outputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime StartTime; + + [OutputConstructor] + private ClusterMaintenancePolicyWeeklyMaintenanceWindow( + string day, + + string? duration, + + Outputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime startTime) + { + Day = day; + Duration = duration; + StartTime = startTime; + } + } +} diff --git a/sdk/dotnet/Redis/Outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime.cs b/sdk/dotnet/Redis/Outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime.cs new file mode 100644 index 0000000000..224e720e0e --- /dev/null +++ b/sdk/dotnet/Redis/Outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime.cs @@ -0,0 +1,51 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Redis.Outputs +{ + + [OutputType] + public sealed class ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime + { + /// + /// Hours of day in 24 hour format. Should be from 0 to 23. + /// An API may choose to allow the value "24:00:00" for scenarios like business closing time. + /// + public readonly int? Hours; + /// + /// Minutes of hour of day. Must be from 0 to 59. + /// + public readonly int? Minutes; + /// + /// Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + /// + public readonly int? Nanos; + /// + /// Seconds of minutes of the time. Must normally be from 0 to 59. + /// An API may allow the value 60 if it allows leap-seconds. + /// + public readonly int? Seconds; + + [OutputConstructor] + private ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime( + int? hours, + + int? minutes, + + int? nanos, + + int? seconds) + { + Hours = hours; + Minutes = minutes; + Nanos = nanos; + Seconds = seconds; + } + } +} diff --git a/sdk/dotnet/Redis/Outputs/ClusterMaintenanceSchedule.cs b/sdk/dotnet/Redis/Outputs/ClusterMaintenanceSchedule.cs new file mode 100644 index 0000000000..2e36f200f6 --- /dev/null +++ b/sdk/dotnet/Redis/Outputs/ClusterMaintenanceSchedule.cs @@ -0,0 +1,52 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.Redis.Outputs +{ + + [OutputType] + public sealed class ClusterMaintenanceSchedule + { + /// + /// (Output) + /// Output only. The end time of any upcoming scheduled maintenance for this cluster. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + public readonly string? EndTime; + /// + /// (Output) + /// Output only. The deadline that the maintenance schedule start time + /// can not go beyond, including reschedule. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + public readonly string? ScheduleDeadlineTime; + /// + /// (Output) + /// Output only. The start time of any upcoming scheduled maintenance for this cluster. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + /// resolution and up to nine fractional digits. + /// + public readonly string? StartTime; + + [OutputConstructor] + private ClusterMaintenanceSchedule( + string? endTime, + + string? scheduleDeadlineTime, + + string? startTime) + { + EndTime = endTime; + ScheduleDeadlineTime = scheduleDeadlineTime; + StartTime = startTime; + } + } +} diff --git a/sdk/dotnet/SecurityCenter/V2FolderSccBigQueryExport.cs b/sdk/dotnet/SecurityCenter/V2FolderSccBigQueryExport.cs new file mode 100644 index 0000000000..0cc75730b3 --- /dev/null +++ b/sdk/dotnet/SecurityCenter/V2FolderSccBigQueryExport.cs @@ -0,0 +1,412 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.SecurityCenter +{ + /// + /// A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + /// It represents exporting Security Command Center data, including assets, findings, and security marks + /// using gcloud scc bqexports + /// > **Note:** In order to use Cloud SCC resources, your organization must be enrolled + /// in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + /// Without doing so, you may run into errors during resource creation. + /// + /// To get more information about FolderSccBigQueryExport, see: + /// + /// * [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/folders.locations.bigQueryExports) + /// * How-to Guides + /// * [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query) + /// + /// ## Example Usage + /// + /// ### Scc V2 Folder Big Query Export Config Basic + /// + /// ```csharp + /// using System.Collections.Generic; + /// using System.Linq; + /// using Pulumi; + /// using Gcp = Pulumi.Gcp; + /// + /// return await Deployment.RunAsync(() => + /// { + /// var folder = new Gcp.Organizations.Folder("folder", new() + /// { + /// Parent = "organizations/123456789", + /// DisplayName = "folder-name", + /// DeletionProtection = false, + /// }); + /// + /// var @default = new Gcp.BigQuery.Dataset("default", new() + /// { + /// DatasetId = "my_dataset_id", + /// FriendlyName = "test", + /// Description = "This is a test description", + /// Location = "US", + /// DefaultTableExpirationMs = 3600000, + /// DefaultPartitionExpirationMs = null, + /// Labels = + /// { + /// { "env", "default" }, + /// }, + /// }); + /// + /// var customBigQueryExportConfig = new Gcp.SecurityCenter.V2FolderSccBigQueryExport("custom_big_query_export_config", new() + /// { + /// BigQueryExportId = "my-export", + /// Folder = folder.FolderId, + /// Dataset = @default.Id, + /// Location = "global", + /// Description = "Cloud Security Command Center Findings Big Query Export Config", + /// Filter = "state=\"ACTIVE\" AND NOT mute=\"MUTED\"", + /// }); + /// + /// }); + /// ``` + /// + /// ## Import + /// + /// FolderSccBigQueryExport can be imported using any of these accepted formats: + /// + /// * `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}` + /// + /// * `{{folder}}/{{location}}/{{big_query_export_id}}` + /// + /// When using the `pulumi import` command, FolderSccBigQueryExport can be imported using one of the formats above. For example: + /// + /// ```sh + /// $ pulumi import gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport default folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} + /// ``` + /// + /// ```sh + /// $ pulumi import gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport default {{folder}}/{{location}}/{{big_query_export_id}} + /// ``` + /// + [GcpResourceType("gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport")] + public partial class V2FolderSccBigQueryExport : global::Pulumi.CustomResource + { + /// + /// This must be unique within the organization. It must consist of only lowercase letters, + /// numbers, and hyphens, must start with a letter, must end with either a letter or a number, + /// and must be 63 characters or less. + /// + /// + /// - - - + /// + [Output("bigQueryExportId")] + public Output BigQueryExportId { get; private set; } = null!; + + /// + /// The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + /// Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + /// + [Output("createTime")] + public Output CreateTime { get; private set; } = null!; + + /// + /// The dataset to write findings' updates to. + /// Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + /// BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + /// + [Output("dataset")] + public Output Dataset { get; private set; } = null!; + + /// + /// The description of the notification config (max of 1024 characters). + /// + [Output("description")] + public Output Description { get; private set; } = null!; + + /// + /// Expression that defines the filter to apply across create/update + /// events of findings. The + /// expression is a list of zero or more restrictions combined via + /// logical operators AND and OR. Parentheses are supported, and OR + /// has higher precedence than AND. + /// Restrictions have the form <field> <operator> <value> and may have + /// a - character in front of them to indicate negation. The fields + /// map to those defined in the corresponding resource. + /// The supported operators are: + /// * = for all value types. + /// * >, <, >=, <= for integer values. + /// * :, meaning substring matching, for strings. + /// The supported value types are: + /// * string literals in quotes. + /// * integer literals without quotes. + /// * boolean literals true and false without quotes. + /// See + /// [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + /// for information on how to write a filter. + /// + [Output("filter")] + public Output Filter { get; private set; } = null!; + + /// + /// The folder where Cloud Security Command Center Big Query Export + /// Config lives in. + /// + [Output("folder")] + public Output Folder { get; private set; } = null!; + + /// + /// The BigQuery export configuration is stored in this location. If not provided, Use global as default. + /// + [Output("location")] + public Output Location { get; private set; } = null!; + + /// + /// Email address of the user who last edited the BigQuery export. + /// This field is set by the server and will be ignored if provided on export creation or update. + /// + [Output("mostRecentEditor")] + public Output MostRecentEditor { get; private set; } = null!; + + /// + /// The resource name of this export, in the format + /// `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + /// This field is provided in responses, and is ignored when provided in create requests. + /// + [Output("name")] + public Output Name { get; private set; } = null!; + + /// + /// The service account that needs permission to create table and upload data to the BigQuery dataset. + /// + [Output("principal")] + public Output Principal { get; private set; } = null!; + + /// + /// The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + /// Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + /// + [Output("updateTime")] + public Output UpdateTime { get; private set; } = null!; + + + /// + /// Create a V2FolderSccBigQueryExport resource with the given unique name, arguments, and options. + /// + /// + /// The unique name of the resource + /// The arguments used to populate this resource's properties + /// A bag of options that control this resource's behavior + public V2FolderSccBigQueryExport(string name, V2FolderSccBigQueryExportArgs args, CustomResourceOptions? options = null) + : base("gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport", name, args ?? new V2FolderSccBigQueryExportArgs(), MakeResourceOptions(options, "")) + { + } + + private V2FolderSccBigQueryExport(string name, Input id, V2FolderSccBigQueryExportState? state = null, CustomResourceOptions? options = null) + : base("gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport", name, state, MakeResourceOptions(options, id)) + { + } + + private static CustomResourceOptions MakeResourceOptions(CustomResourceOptions? options, Input? id) + { + var defaultOptions = new CustomResourceOptions + { + Version = Utilities.Version, + }; + var merged = CustomResourceOptions.Merge(defaultOptions, options); + // Override the ID if one was specified for consistency with other language SDKs. + merged.Id = id ?? merged.Id; + return merged; + } + /// + /// Get an existing V2FolderSccBigQueryExport resource's state with the given name, ID, and optional extra + /// properties used to qualify the lookup. + /// + /// + /// The unique name of the resulting resource. + /// The unique provider ID of the resource to lookup. + /// Any extra arguments used during the lookup. + /// A bag of options that control this resource's behavior + public static V2FolderSccBigQueryExport Get(string name, Input id, V2FolderSccBigQueryExportState? state = null, CustomResourceOptions? options = null) + { + return new V2FolderSccBigQueryExport(name, id, state, options); + } + } + + public sealed class V2FolderSccBigQueryExportArgs : global::Pulumi.ResourceArgs + { + /// + /// This must be unique within the organization. It must consist of only lowercase letters, + /// numbers, and hyphens, must start with a letter, must end with either a letter or a number, + /// and must be 63 characters or less. + /// + /// + /// - - - + /// + [Input("bigQueryExportId", required: true)] + public Input BigQueryExportId { get; set; } = null!; + + /// + /// The dataset to write findings' updates to. + /// Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + /// BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + /// + [Input("dataset")] + public Input? Dataset { get; set; } + + /// + /// The description of the notification config (max of 1024 characters). + /// + [Input("description")] + public Input? Description { get; set; } + + /// + /// Expression that defines the filter to apply across create/update + /// events of findings. The + /// expression is a list of zero or more restrictions combined via + /// logical operators AND and OR. Parentheses are supported, and OR + /// has higher precedence than AND. + /// Restrictions have the form <field> <operator> <value> and may have + /// a - character in front of them to indicate negation. The fields + /// map to those defined in the corresponding resource. + /// The supported operators are: + /// * = for all value types. + /// * >, <, >=, <= for integer values. + /// * :, meaning substring matching, for strings. + /// The supported value types are: + /// * string literals in quotes. + /// * integer literals without quotes. + /// * boolean literals true and false without quotes. + /// See + /// [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + /// for information on how to write a filter. + /// + [Input("filter")] + public Input? Filter { get; set; } + + /// + /// The folder where Cloud Security Command Center Big Query Export + /// Config lives in. + /// + [Input("folder", required: true)] + public Input Folder { get; set; } = null!; + + /// + /// The BigQuery export configuration is stored in this location. If not provided, Use global as default. + /// + [Input("location")] + public Input? Location { get; set; } + + public V2FolderSccBigQueryExportArgs() + { + } + public static new V2FolderSccBigQueryExportArgs Empty => new V2FolderSccBigQueryExportArgs(); + } + + public sealed class V2FolderSccBigQueryExportState : global::Pulumi.ResourceArgs + { + /// + /// This must be unique within the organization. It must consist of only lowercase letters, + /// numbers, and hyphens, must start with a letter, must end with either a letter or a number, + /// and must be 63 characters or less. + /// + /// + /// - - - + /// + [Input("bigQueryExportId")] + public Input? BigQueryExportId { get; set; } + + /// + /// The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + /// Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + /// + [Input("createTime")] + public Input? CreateTime { get; set; } + + /// + /// The dataset to write findings' updates to. + /// Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + /// BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + /// + [Input("dataset")] + public Input? Dataset { get; set; } + + /// + /// The description of the notification config (max of 1024 characters). + /// + [Input("description")] + public Input? Description { get; set; } + + /// + /// Expression that defines the filter to apply across create/update + /// events of findings. The + /// expression is a list of zero or more restrictions combined via + /// logical operators AND and OR. Parentheses are supported, and OR + /// has higher precedence than AND. + /// Restrictions have the form <field> <operator> <value> and may have + /// a - character in front of them to indicate negation. The fields + /// map to those defined in the corresponding resource. + /// The supported operators are: + /// * = for all value types. + /// * >, <, >=, <= for integer values. + /// * :, meaning substring matching, for strings. + /// The supported value types are: + /// * string literals in quotes. + /// * integer literals without quotes. + /// * boolean literals true and false without quotes. + /// See + /// [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + /// for information on how to write a filter. + /// + [Input("filter")] + public Input? Filter { get; set; } + + /// + /// The folder where Cloud Security Command Center Big Query Export + /// Config lives in. + /// + [Input("folder")] + public Input? Folder { get; set; } + + /// + /// The BigQuery export configuration is stored in this location. If not provided, Use global as default. + /// + [Input("location")] + public Input? Location { get; set; } + + /// + /// Email address of the user who last edited the BigQuery export. + /// This field is set by the server and will be ignored if provided on export creation or update. + /// + [Input("mostRecentEditor")] + public Input? MostRecentEditor { get; set; } + + /// + /// The resource name of this export, in the format + /// `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + /// This field is provided in responses, and is ignored when provided in create requests. + /// + [Input("name")] + public Input? Name { get; set; } + + /// + /// The service account that needs permission to create table and upload data to the BigQuery dataset. + /// + [Input("principal")] + public Input? Principal { get; set; } + + /// + /// The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + /// Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + /// + [Input("updateTime")] + public Input? UpdateTime { get; set; } + + public V2FolderSccBigQueryExportState() + { + } + public static new V2FolderSccBigQueryExportState Empty => new V2FolderSccBigQueryExportState(); + } +} diff --git a/sdk/dotnet/SecurityCenter/V2OrganizationSccBigQueryExports.cs b/sdk/dotnet/SecurityCenter/V2OrganizationSccBigQueryExports.cs index bf00f4cff3..e89b3ffc60 100644 --- a/sdk/dotnet/SecurityCenter/V2OrganizationSccBigQueryExports.cs +++ b/sdk/dotnet/SecurityCenter/V2OrganizationSccBigQueryExports.cs @@ -37,7 +37,7 @@ namespace Pulumi.Gcp.SecurityCenter /// { /// var @default = new Gcp.BigQuery.Dataset("default", new() /// { - /// DatasetId = "my_dataset_id", + /// DatasetId = "", /// FriendlyName = "test", /// Description = "This is a test description", /// Location = "US", @@ -54,7 +54,7 @@ namespace Pulumi.Gcp.SecurityCenter /// Name = "my-export", /// BigQueryExportId = "my-export", /// Organization = "123456789", - /// Dataset = "my-dataset", + /// Dataset = @default.Id, /// Location = "global", /// Description = "Cloud Security Command Center Findings Big Query Export Config", /// Filter = "state=\"ACTIVE\" AND NOT mute=\"MUTED\"", diff --git a/sdk/dotnet/SecurityCenter/V2ProjectSccBigQueryExport.cs b/sdk/dotnet/SecurityCenter/V2ProjectSccBigQueryExport.cs new file mode 100644 index 0000000000..f16f1668da --- /dev/null +++ b/sdk/dotnet/SecurityCenter/V2ProjectSccBigQueryExport.cs @@ -0,0 +1,368 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Threading.Tasks; +using Pulumi.Serialization; + +namespace Pulumi.Gcp.SecurityCenter +{ + /// + /// A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + /// It represents exporting Security Command Center data, including assets, findings, and security marks + /// using gcloud scc bqexports + /// > **Note:** In order to use Cloud SCC resources, your organization must be enrolled + /// in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + /// Without doing so, you may run into errors during resource creation. + /// + /// To get more information about ProjectSccBigQueryExport, see: + /// + /// * [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/projects.locations.bigQueryExports) + /// * How-to Guides + /// * [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query) + /// + /// ## Example Usage + /// + /// ## Import + /// + /// ProjectSccBigQueryExport can be imported using any of these accepted formats: + /// + /// * `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}` + /// + /// * `{{project}}/{{location}}/{{big_query_export_id}}` + /// + /// * `{{location}}/{{big_query_export_id}}` + /// + /// When using the `pulumi import` command, ProjectSccBigQueryExport can be imported using one of the formats above. For example: + /// + /// ```sh + /// $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} + /// ``` + /// + /// ```sh + /// $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default {{project}}/{{location}}/{{big_query_export_id}} + /// ``` + /// + /// ```sh + /// $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default {{location}}/{{big_query_export_id}} + /// ``` + /// + [GcpResourceType("gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport")] + public partial class V2ProjectSccBigQueryExport : global::Pulumi.CustomResource + { + /// + /// This must be unique within the organization. + /// + /// + /// - - - + /// + [Output("bigQueryExportId")] + public Output BigQueryExportId { get; private set; } = null!; + + /// + /// The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + /// Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + /// + [Output("createTime")] + public Output CreateTime { get; private set; } = null!; + + /// + /// The dataset to write findings' updates to. + /// Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + /// BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + /// + [Output("dataset")] + public Output Dataset { get; private set; } = null!; + + /// + /// The description of the notification config (max of 1024 characters). + /// + [Output("description")] + public Output Description { get; private set; } = null!; + + /// + /// Expression that defines the filter to apply across create/update + /// events of findings. The + /// expression is a list of zero or more restrictions combined via + /// logical operators AND and OR. Parentheses are supported, and OR + /// has higher precedence than AND. + /// Restrictions have the form <field> <operator> <value> and may have + /// a - character in front of them to indicate negation. The fields + /// map to those defined in the corresponding resource. + /// The supported operators are: + /// * = for all value types. + /// * >, <, >=, <= for integer values. + /// * :, meaning substring matching, for strings. + /// The supported value types are: + /// * string literals in quotes. + /// * integer literals without quotes. + /// * boolean literals true and false without quotes. + /// See + /// [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + /// for information on how to write a filter. + /// + [Output("filter")] + public Output Filter { get; private set; } = null!; + + /// + /// location Id is provided by organization. If not provided, Use global as default. + /// + [Output("location")] + public Output Location { get; private set; } = null!; + + /// + /// Email address of the user who last edited the BigQuery export. + /// This field is set by the server and will be ignored if provided on export creation or update. + /// + [Output("mostRecentEditor")] + public Output MostRecentEditor { get; private set; } = null!; + + /// + /// The resource name of this export, in the format + /// `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + /// This field is provided in responses, and is ignored when provided in create requests. + /// + [Output("name")] + public Output Name { get; private set; } = null!; + + /// + /// The service account that needs permission to create table and upload data to the BigQuery dataset. + /// + [Output("principal")] + public Output Principal { get; private set; } = null!; + + /// + /// The ID of the project in which the resource belongs. + /// If it is not provided, the provider project is used. + /// + [Output("project")] + public Output Project { get; private set; } = null!; + + /// + /// The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + /// Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + /// + [Output("updateTime")] + public Output UpdateTime { get; private set; } = null!; + + + /// + /// Create a V2ProjectSccBigQueryExport resource with the given unique name, arguments, and options. + /// + /// + /// The unique name of the resource + /// The arguments used to populate this resource's properties + /// A bag of options that control this resource's behavior + public V2ProjectSccBigQueryExport(string name, V2ProjectSccBigQueryExportArgs args, CustomResourceOptions? options = null) + : base("gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport", name, args ?? new V2ProjectSccBigQueryExportArgs(), MakeResourceOptions(options, "")) + { + } + + private V2ProjectSccBigQueryExport(string name, Input id, V2ProjectSccBigQueryExportState? state = null, CustomResourceOptions? options = null) + : base("gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport", name, state, MakeResourceOptions(options, id)) + { + } + + private static CustomResourceOptions MakeResourceOptions(CustomResourceOptions? options, Input? id) + { + var defaultOptions = new CustomResourceOptions + { + Version = Utilities.Version, + }; + var merged = CustomResourceOptions.Merge(defaultOptions, options); + // Override the ID if one was specified for consistency with other language SDKs. + merged.Id = id ?? merged.Id; + return merged; + } + /// + /// Get an existing V2ProjectSccBigQueryExport resource's state with the given name, ID, and optional extra + /// properties used to qualify the lookup. + /// + /// + /// The unique name of the resulting resource. + /// The unique provider ID of the resource to lookup. + /// Any extra arguments used during the lookup. + /// A bag of options that control this resource's behavior + public static V2ProjectSccBigQueryExport Get(string name, Input id, V2ProjectSccBigQueryExportState? state = null, CustomResourceOptions? options = null) + { + return new V2ProjectSccBigQueryExport(name, id, state, options); + } + } + + public sealed class V2ProjectSccBigQueryExportArgs : global::Pulumi.ResourceArgs + { + /// + /// This must be unique within the organization. + /// + /// + /// - - - + /// + [Input("bigQueryExportId", required: true)] + public Input BigQueryExportId { get; set; } = null!; + + /// + /// The dataset to write findings' updates to. + /// Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + /// BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + /// + [Input("dataset")] + public Input? Dataset { get; set; } + + /// + /// The description of the notification config (max of 1024 characters). + /// + [Input("description")] + public Input? Description { get; set; } + + /// + /// Expression that defines the filter to apply across create/update + /// events of findings. The + /// expression is a list of zero or more restrictions combined via + /// logical operators AND and OR. Parentheses are supported, and OR + /// has higher precedence than AND. + /// Restrictions have the form <field> <operator> <value> and may have + /// a - character in front of them to indicate negation. The fields + /// map to those defined in the corresponding resource. + /// The supported operators are: + /// * = for all value types. + /// * >, <, >=, <= for integer values. + /// * :, meaning substring matching, for strings. + /// The supported value types are: + /// * string literals in quotes. + /// * integer literals without quotes. + /// * boolean literals true and false without quotes. + /// See + /// [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + /// for information on how to write a filter. + /// + [Input("filter")] + public Input? Filter { get; set; } + + /// + /// location Id is provided by organization. If not provided, Use global as default. + /// + [Input("location")] + public Input? Location { get; set; } + + /// + /// The ID of the project in which the resource belongs. + /// If it is not provided, the provider project is used. + /// + [Input("project")] + public Input? Project { get; set; } + + public V2ProjectSccBigQueryExportArgs() + { + } + public static new V2ProjectSccBigQueryExportArgs Empty => new V2ProjectSccBigQueryExportArgs(); + } + + public sealed class V2ProjectSccBigQueryExportState : global::Pulumi.ResourceArgs + { + /// + /// This must be unique within the organization. + /// + /// + /// - - - + /// + [Input("bigQueryExportId")] + public Input? BigQueryExportId { get; set; } + + /// + /// The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + /// Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + /// + [Input("createTime")] + public Input? CreateTime { get; set; } + + /// + /// The dataset to write findings' updates to. + /// Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + /// BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + /// + [Input("dataset")] + public Input? Dataset { get; set; } + + /// + /// The description of the notification config (max of 1024 characters). + /// + [Input("description")] + public Input? Description { get; set; } + + /// + /// Expression that defines the filter to apply across create/update + /// events of findings. The + /// expression is a list of zero or more restrictions combined via + /// logical operators AND and OR. Parentheses are supported, and OR + /// has higher precedence than AND. + /// Restrictions have the form <field> <operator> <value> and may have + /// a - character in front of them to indicate negation. The fields + /// map to those defined in the corresponding resource. + /// The supported operators are: + /// * = for all value types. + /// * >, <, >=, <= for integer values. + /// * :, meaning substring matching, for strings. + /// The supported value types are: + /// * string literals in quotes. + /// * integer literals without quotes. + /// * boolean literals true and false without quotes. + /// See + /// [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + /// for information on how to write a filter. + /// + [Input("filter")] + public Input? Filter { get; set; } + + /// + /// location Id is provided by organization. If not provided, Use global as default. + /// + [Input("location")] + public Input? Location { get; set; } + + /// + /// Email address of the user who last edited the BigQuery export. + /// This field is set by the server and will be ignored if provided on export creation or update. + /// + [Input("mostRecentEditor")] + public Input? MostRecentEditor { get; set; } + + /// + /// The resource name of this export, in the format + /// `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + /// This field is provided in responses, and is ignored when provided in create requests. + /// + [Input("name")] + public Input? Name { get; set; } + + /// + /// The service account that needs permission to create table and upload data to the BigQuery dataset. + /// + [Input("principal")] + public Input? Principal { get; set; } + + /// + /// The ID of the project in which the resource belongs. + /// If it is not provided, the provider project is used. + /// + [Input("project")] + public Input? Project { get; set; } + + /// + /// The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + /// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + /// Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + /// + [Input("updateTime")] + public Input? UpdateTime { get; set; } + + public V2ProjectSccBigQueryExportState() + { + } + public static new V2ProjectSccBigQueryExportState Empty => new V2ProjectSccBigQueryExportState(); + } +} diff --git a/sdk/go.mod b/sdk/go.mod index 993351fdb0..40d716e5bd 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -4,6 +4,7 @@ go 1.21 require ( github.com/blang/semver v3.5.1+incompatible + github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0 github.com/pulumi/pulumi/sdk/v3 v3.130.0 ) diff --git a/sdk/go.sum b/sdk/go.sum index 06ce7f4097..10756f3356 100644 --- a/sdk/go.sum +++ b/sdk/go.sum @@ -150,6 +150,8 @@ github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231 h1:vkHw5I/plNdTr435 github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231/go.mod h1:murToZ2N9hNJzewjHBgfFdXhZKjY3z5cYC1VXk+lbFE= github.com/pulumi/esc v0.9.1 h1:HH5eEv8sgyxSpY5a8yePyqFXzA8cvBvapfH8457+mIs= github.com/pulumi/esc v0.9.1/go.mod h1:oEJ6bOsjYlQUpjf70GiX+CXn3VBmpwFDxUTlmtUN84c= +github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0 h1:21oSj+TKlKTzQcxN9Hik7iSNNHPUQXN4s3itOnahy/w= +github.com/pulumi/pulumi-gcp/sdk/v7 v7.38.0/go.mod h1:YaEZms1NgXFqGhObKVofcAeWXu2V+3t/BAXdHQZq7fU= github.com/pulumi/pulumi/sdk/v3 v3.130.0 h1:gGJNd+akPqhZ+vrsZmAjSNJn6kGJkitjjkwrmIQMmn8= github.com/pulumi/pulumi/sdk/v3 v3.130.0/go.mod h1:p1U24en3zt51agx+WlNboSOV8eLlPWYAkxMzVEXKbnY= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= diff --git a/sdk/go/gcp/alloydb/cluster.go b/sdk/go/gcp/alloydb/cluster.go index d5bd0b8ac8..f250e2e702 100644 --- a/sdk/go/gcp/alloydb/cluster.go +++ b/sdk/go/gcp/alloydb/cluster.go @@ -289,6 +289,7 @@ type Cluster struct { // Policy to determine if the cluster should be deleted forcefully. // Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. // Deleting a Secondary cluster with a secondary instance REQUIRES setting deletionPolicy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + // Possible values: DEFAULT, FORCE DeletionPolicy pulumi.StringPtrOutput `pulumi:"deletionPolicy"` // User-settable and human-readable display name for the Cluster. DisplayName pulumi.StringPtrOutput `pulumi:"displayName"` @@ -350,6 +351,12 @@ type Cluster struct { SecondaryConfig ClusterSecondaryConfigPtrOutput `pulumi:"secondaryConfig"` // Output only. The current serving state of the cluster. State pulumi.StringOutput `pulumi:"state"` + // The subscrition type of cluster. + // Possible values are: `TRIAL`, `STANDARD`. + SubscriptionType pulumi.StringOutput `pulumi:"subscriptionType"` + // Contains information and all metadata related to TRIAL clusters. + // Structure is documented below. + TrialMetadatas ClusterTrialMetadataArrayOutput `pulumi:"trialMetadatas"` // The system-generated UID of the resource. Uid pulumi.StringOutput `pulumi:"uid"` } @@ -425,6 +432,7 @@ type clusterState struct { // Policy to determine if the cluster should be deleted forcefully. // Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. // Deleting a Secondary cluster with a secondary instance REQUIRES setting deletionPolicy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + // Possible values: DEFAULT, FORCE DeletionPolicy *string `pulumi:"deletionPolicy"` // User-settable and human-readable display name for the Cluster. DisplayName *string `pulumi:"displayName"` @@ -486,6 +494,12 @@ type clusterState struct { SecondaryConfig *ClusterSecondaryConfig `pulumi:"secondaryConfig"` // Output only. The current serving state of the cluster. State *string `pulumi:"state"` + // The subscrition type of cluster. + // Possible values are: `TRIAL`, `STANDARD`. + SubscriptionType *string `pulumi:"subscriptionType"` + // Contains information and all metadata related to TRIAL clusters. + // Structure is documented below. + TrialMetadatas []ClusterTrialMetadata `pulumi:"trialMetadatas"` // The system-generated UID of the resource. Uid *string `pulumi:"uid"` } @@ -521,6 +535,7 @@ type ClusterState struct { // Policy to determine if the cluster should be deleted forcefully. // Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. // Deleting a Secondary cluster with a secondary instance REQUIRES setting deletionPolicy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + // Possible values: DEFAULT, FORCE DeletionPolicy pulumi.StringPtrInput // User-settable and human-readable display name for the Cluster. DisplayName pulumi.StringPtrInput @@ -582,6 +597,12 @@ type ClusterState struct { SecondaryConfig ClusterSecondaryConfigPtrInput // Output only. The current serving state of the cluster. State pulumi.StringPtrInput + // The subscrition type of cluster. + // Possible values are: `TRIAL`, `STANDARD`. + SubscriptionType pulumi.StringPtrInput + // Contains information and all metadata related to TRIAL clusters. + // Structure is documented below. + TrialMetadatas ClusterTrialMetadataArrayInput // The system-generated UID of the resource. Uid pulumi.StringPtrInput } @@ -615,6 +636,7 @@ type clusterArgs struct { // Policy to determine if the cluster should be deleted forcefully. // Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. // Deleting a Secondary cluster with a secondary instance REQUIRES setting deletionPolicy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + // Possible values: DEFAULT, FORCE DeletionPolicy *string `pulumi:"deletionPolicy"` // User-settable and human-readable display name for the Cluster. DisplayName *string `pulumi:"displayName"` @@ -655,6 +677,9 @@ type clusterArgs struct { // Configuration of the secondary cluster for Cross Region Replication. This should be set if and only if the cluster is of type SECONDARY. // Structure is documented below. SecondaryConfig *ClusterSecondaryConfig `pulumi:"secondaryConfig"` + // The subscrition type of cluster. + // Possible values are: `TRIAL`, `STANDARD`. + SubscriptionType *string `pulumi:"subscriptionType"` } // The set of arguments for constructing a Cluster resource. @@ -683,6 +708,7 @@ type ClusterArgs struct { // Policy to determine if the cluster should be deleted forcefully. // Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. // Deleting a Secondary cluster with a secondary instance REQUIRES setting deletionPolicy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + // Possible values: DEFAULT, FORCE DeletionPolicy pulumi.StringPtrInput // User-settable and human-readable display name for the Cluster. DisplayName pulumi.StringPtrInput @@ -723,6 +749,9 @@ type ClusterArgs struct { // Configuration of the secondary cluster for Cross Region Replication. This should be set if and only if the cluster is of type SECONDARY. // Structure is documented below. SecondaryConfig ClusterSecondaryConfigPtrInput + // The subscrition type of cluster. + // Possible values are: `TRIAL`, `STANDARD`. + SubscriptionType pulumi.StringPtrInput } func (ClusterArgs) ElementType() reflect.Type { @@ -866,6 +895,7 @@ func (o ClusterOutput) DatabaseVersion() pulumi.StringOutput { // Policy to determine if the cluster should be deleted forcefully. // Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. // Deleting a Secondary cluster with a secondary instance REQUIRES setting deletionPolicy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. +// Possible values: DEFAULT, FORCE func (o ClusterOutput) DeletionPolicy() pulumi.StringPtrOutput { return o.ApplyT(func(v *Cluster) pulumi.StringPtrOutput { return v.DeletionPolicy }).(pulumi.StringPtrOutput) } @@ -993,6 +1023,18 @@ func (o ClusterOutput) State() pulumi.StringOutput { return o.ApplyT(func(v *Cluster) pulumi.StringOutput { return v.State }).(pulumi.StringOutput) } +// The subscrition type of cluster. +// Possible values are: `TRIAL`, `STANDARD`. +func (o ClusterOutput) SubscriptionType() pulumi.StringOutput { + return o.ApplyT(func(v *Cluster) pulumi.StringOutput { return v.SubscriptionType }).(pulumi.StringOutput) +} + +// Contains information and all metadata related to TRIAL clusters. +// Structure is documented below. +func (o ClusterOutput) TrialMetadatas() ClusterTrialMetadataArrayOutput { + return o.ApplyT(func(v *Cluster) ClusterTrialMetadataArrayOutput { return v.TrialMetadatas }).(ClusterTrialMetadataArrayOutput) +} + // The system-generated UID of the resource. func (o ClusterOutput) Uid() pulumi.StringOutput { return o.ApplyT(func(v *Cluster) pulumi.StringOutput { return v.Uid }).(pulumi.StringOutput) diff --git a/sdk/go/gcp/alloydb/pulumiTypes.go b/sdk/go/gcp/alloydb/pulumiTypes.go index 774f717f15..0b9bc2ff1f 100644 --- a/sdk/go/gcp/alloydb/pulumiTypes.go +++ b/sdk/go/gcp/alloydb/pulumiTypes.go @@ -3648,6 +3648,130 @@ func (o ClusterSecondaryConfigPtrOutput) PrimaryClusterName() pulumi.StringPtrOu }).(pulumi.StringPtrOutput) } +type ClusterTrialMetadata struct { + // End time of the trial cluster. + EndTime *string `pulumi:"endTime"` + // Grace end time of the trial cluster. + GraceEndTime *string `pulumi:"graceEndTime"` + // Start time of the trial cluster. + StartTime *string `pulumi:"startTime"` + // Upgrade time of the trial cluster to standard cluster. + UpgradeTime *string `pulumi:"upgradeTime"` +} + +// ClusterTrialMetadataInput is an input type that accepts ClusterTrialMetadataArgs and ClusterTrialMetadataOutput values. +// You can construct a concrete instance of `ClusterTrialMetadataInput` via: +// +// ClusterTrialMetadataArgs{...} +type ClusterTrialMetadataInput interface { + pulumi.Input + + ToClusterTrialMetadataOutput() ClusterTrialMetadataOutput + ToClusterTrialMetadataOutputWithContext(context.Context) ClusterTrialMetadataOutput +} + +type ClusterTrialMetadataArgs struct { + // End time of the trial cluster. + EndTime pulumi.StringPtrInput `pulumi:"endTime"` + // Grace end time of the trial cluster. + GraceEndTime pulumi.StringPtrInput `pulumi:"graceEndTime"` + // Start time of the trial cluster. + StartTime pulumi.StringPtrInput `pulumi:"startTime"` + // Upgrade time of the trial cluster to standard cluster. + UpgradeTime pulumi.StringPtrInput `pulumi:"upgradeTime"` +} + +func (ClusterTrialMetadataArgs) ElementType() reflect.Type { + return reflect.TypeOf((*ClusterTrialMetadata)(nil)).Elem() +} + +func (i ClusterTrialMetadataArgs) ToClusterTrialMetadataOutput() ClusterTrialMetadataOutput { + return i.ToClusterTrialMetadataOutputWithContext(context.Background()) +} + +func (i ClusterTrialMetadataArgs) ToClusterTrialMetadataOutputWithContext(ctx context.Context) ClusterTrialMetadataOutput { + return pulumi.ToOutputWithContext(ctx, i).(ClusterTrialMetadataOutput) +} + +// ClusterTrialMetadataArrayInput is an input type that accepts ClusterTrialMetadataArray and ClusterTrialMetadataArrayOutput values. +// You can construct a concrete instance of `ClusterTrialMetadataArrayInput` via: +// +// ClusterTrialMetadataArray{ ClusterTrialMetadataArgs{...} } +type ClusterTrialMetadataArrayInput interface { + pulumi.Input + + ToClusterTrialMetadataArrayOutput() ClusterTrialMetadataArrayOutput + ToClusterTrialMetadataArrayOutputWithContext(context.Context) ClusterTrialMetadataArrayOutput +} + +type ClusterTrialMetadataArray []ClusterTrialMetadataInput + +func (ClusterTrialMetadataArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]ClusterTrialMetadata)(nil)).Elem() +} + +func (i ClusterTrialMetadataArray) ToClusterTrialMetadataArrayOutput() ClusterTrialMetadataArrayOutput { + return i.ToClusterTrialMetadataArrayOutputWithContext(context.Background()) +} + +func (i ClusterTrialMetadataArray) ToClusterTrialMetadataArrayOutputWithContext(ctx context.Context) ClusterTrialMetadataArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(ClusterTrialMetadataArrayOutput) +} + +type ClusterTrialMetadataOutput struct{ *pulumi.OutputState } + +func (ClusterTrialMetadataOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ClusterTrialMetadata)(nil)).Elem() +} + +func (o ClusterTrialMetadataOutput) ToClusterTrialMetadataOutput() ClusterTrialMetadataOutput { + return o +} + +func (o ClusterTrialMetadataOutput) ToClusterTrialMetadataOutputWithContext(ctx context.Context) ClusterTrialMetadataOutput { + return o +} + +// End time of the trial cluster. +func (o ClusterTrialMetadataOutput) EndTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v ClusterTrialMetadata) *string { return v.EndTime }).(pulumi.StringPtrOutput) +} + +// Grace end time of the trial cluster. +func (o ClusterTrialMetadataOutput) GraceEndTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v ClusterTrialMetadata) *string { return v.GraceEndTime }).(pulumi.StringPtrOutput) +} + +// Start time of the trial cluster. +func (o ClusterTrialMetadataOutput) StartTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v ClusterTrialMetadata) *string { return v.StartTime }).(pulumi.StringPtrOutput) +} + +// Upgrade time of the trial cluster to standard cluster. +func (o ClusterTrialMetadataOutput) UpgradeTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v ClusterTrialMetadata) *string { return v.UpgradeTime }).(pulumi.StringPtrOutput) +} + +type ClusterTrialMetadataArrayOutput struct{ *pulumi.OutputState } + +func (ClusterTrialMetadataArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]ClusterTrialMetadata)(nil)).Elem() +} + +func (o ClusterTrialMetadataArrayOutput) ToClusterTrialMetadataArrayOutput() ClusterTrialMetadataArrayOutput { + return o +} + +func (o ClusterTrialMetadataArrayOutput) ToClusterTrialMetadataArrayOutputWithContext(ctx context.Context) ClusterTrialMetadataArrayOutput { + return o +} + +func (o ClusterTrialMetadataArrayOutput) Index(i pulumi.IntInput) ClusterTrialMetadataOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) ClusterTrialMetadata { + return vs[0].([]ClusterTrialMetadata)[vs[1].(int)] + }).(ClusterTrialMetadataOutput) +} + type InstanceClientConnectionConfig struct { // Configuration to enforce connectors only (ex: AuthProxy) connections to the database. RequireConnectors *bool `pulumi:"requireConnectors"` @@ -5625,6 +5749,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*ClusterRestoreContinuousBackupSourcePtrInput)(nil)).Elem(), ClusterRestoreContinuousBackupSourceArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ClusterSecondaryConfigInput)(nil)).Elem(), ClusterSecondaryConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ClusterSecondaryConfigPtrInput)(nil)).Elem(), ClusterSecondaryConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ClusterTrialMetadataInput)(nil)).Elem(), ClusterTrialMetadataArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ClusterTrialMetadataArrayInput)(nil)).Elem(), ClusterTrialMetadataArray{}) pulumi.RegisterInputType(reflect.TypeOf((*InstanceClientConnectionConfigInput)(nil)).Elem(), InstanceClientConnectionConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*InstanceClientConnectionConfigPtrInput)(nil)).Elem(), InstanceClientConnectionConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*InstanceClientConnectionConfigSslConfigInput)(nil)).Elem(), InstanceClientConnectionConfigSslConfigArgs{}) @@ -5700,6 +5826,8 @@ func init() { pulumi.RegisterOutputType(ClusterRestoreContinuousBackupSourcePtrOutput{}) pulumi.RegisterOutputType(ClusterSecondaryConfigOutput{}) pulumi.RegisterOutputType(ClusterSecondaryConfigPtrOutput{}) + pulumi.RegisterOutputType(ClusterTrialMetadataOutput{}) + pulumi.RegisterOutputType(ClusterTrialMetadataArrayOutput{}) pulumi.RegisterOutputType(InstanceClientConnectionConfigOutput{}) pulumi.RegisterOutputType(InstanceClientConnectionConfigPtrOutput{}) pulumi.RegisterOutputType(InstanceClientConnectionConfigSslConfigOutput{}) diff --git a/sdk/go/gcp/assuredworkloads/workload.go b/sdk/go/gcp/assuredworkloads/workload.go index af9a409cd9..23a68240fe 100644 --- a/sdk/go/gcp/assuredworkloads/workload.go +++ b/sdk/go/gcp/assuredworkloads/workload.go @@ -43,7 +43,7 @@ import ( // ProvisionedResourcesParent: pulumi.String("folders/519620126891"), // ResourceSettings: assuredworkloads.WorkloadResourceSettingArray{ // &assuredworkloads.WorkloadResourceSettingArgs{ -// DisplayName: pulumi.String("folder-display-name"), +// DisplayName: pulumi.String("{{name}}"), // ResourceType: pulumi.String("CONSUMER_FOLDER"), // }, // &assuredworkloads.WorkloadResourceSettingArgs{ @@ -116,6 +116,58 @@ import ( // } // // ``` +// ### Split_billing_partner_workload +// A Split billing partner test of the assuredworkloads api +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/assuredworkloads" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := assuredworkloads.NewWorkload(ctx, "primary", &assuredworkloads.WorkloadArgs{ +// ComplianceRegime: pulumi.String("ASSURED_WORKLOADS_FOR_PARTNERS"), +// DisplayName: pulumi.String("display"), +// Location: pulumi.String("europe-west8"), +// Organization: pulumi.String("123456789"), +// BillingAccount: pulumi.String("billingAccounts/000000-0000000-0000000-000000"), +// Partner: pulumi.String("SOVEREIGN_CONTROLS_BY_PSN"), +// PartnerPermissions: &assuredworkloads.WorkloadPartnerPermissionsArgs{ +// AssuredWorkloadsMonitoring: pulumi.Bool(true), +// DataLogsViewer: pulumi.Bool(true), +// ServiceAccessApprover: pulumi.Bool(true), +// }, +// PartnerServicesBillingAccount: pulumi.String("billingAccounts/01BF3F-2C6DE5-30C607"), +// ResourceSettings: assuredworkloads.WorkloadResourceSettingArray{ +// &assuredworkloads.WorkloadResourceSettingArgs{ +// ResourceType: pulumi.String("CONSUMER_FOLDER"), +// }, +// &assuredworkloads.WorkloadResourceSettingArgs{ +// ResourceType: pulumi.String("ENCRYPTION_KEYS_PROJECT"), +// }, +// &assuredworkloads.WorkloadResourceSettingArgs{ +// ResourceId: pulumi.String("ring"), +// ResourceType: pulumi.String("KEYRING"), +// }, +// }, +// ViolationNotificationsEnabled: pulumi.Bool(true), +// Labels: pulumi.StringMap{ +// "label-one": pulumi.String("value-one"), +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` // // ## Import // @@ -139,7 +191,7 @@ type Workload struct { // Optional. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. BillingAccount pulumi.StringPtrOutput `pulumi:"billingAccount"` - // Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + // Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS ComplianceRegime pulumi.StringOutput `pulumi:"complianceRegime"` // Output only. Count of active Violations in the Workload. ComplianceStatuses WorkloadComplianceStatusArrayOutput `pulumi:"complianceStatuses"` @@ -172,10 +224,12 @@ type Workload struct { // // *** Organization pulumi.StringOutput `pulumi:"organization"` - // Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + // Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM Partner pulumi.StringPtrOutput `pulumi:"partner"` // Optional. Permissions granted to the AW Partner SA account for the customer workload PartnerPermissions WorkloadPartnerPermissionsPtrOutput `pulumi:"partnerPermissions"` + // Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + PartnerServicesBillingAccount pulumi.StringPtrOutput `pulumi:"partnerServicesBillingAccount"` // Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} ProvisionedResourcesParent pulumi.StringPtrOutput `pulumi:"provisionedResourcesParent"` // The combination of labels configured directly on the resource and default labels configured on the provider. @@ -239,7 +293,7 @@ func GetWorkload(ctx *pulumi.Context, type workloadState struct { // Optional. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. BillingAccount *string `pulumi:"billingAccount"` - // Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + // Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS ComplianceRegime *string `pulumi:"complianceRegime"` // Output only. Count of active Violations in the Workload. ComplianceStatuses []WorkloadComplianceStatus `pulumi:"complianceStatuses"` @@ -272,10 +326,12 @@ type workloadState struct { // // *** Organization *string `pulumi:"organization"` - // Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + // Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM Partner *string `pulumi:"partner"` // Optional. Permissions granted to the AW Partner SA account for the customer workload PartnerPermissions *WorkloadPartnerPermissions `pulumi:"partnerPermissions"` + // Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + PartnerServicesBillingAccount *string `pulumi:"partnerServicesBillingAccount"` // Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} ProvisionedResourcesParent *string `pulumi:"provisionedResourcesParent"` // The combination of labels configured directly on the resource and default labels configured on the provider. @@ -293,7 +349,7 @@ type workloadState struct { type WorkloadState struct { // Optional. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. BillingAccount pulumi.StringPtrInput - // Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + // Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS ComplianceRegime pulumi.StringPtrInput // Output only. Count of active Violations in the Workload. ComplianceStatuses WorkloadComplianceStatusArrayInput @@ -326,10 +382,12 @@ type WorkloadState struct { // // *** Organization pulumi.StringPtrInput - // Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + // Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM Partner pulumi.StringPtrInput // Optional. Permissions granted to the AW Partner SA account for the customer workload PartnerPermissions WorkloadPartnerPermissionsPtrInput + // Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + PartnerServicesBillingAccount pulumi.StringPtrInput // Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} ProvisionedResourcesParent pulumi.StringPtrInput // The combination of labels configured directly on the resource and default labels configured on the provider. @@ -351,7 +409,7 @@ func (WorkloadState) ElementType() reflect.Type { type workloadArgs struct { // Optional. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. BillingAccount *string `pulumi:"billingAccount"` - // Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + // Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS ComplianceRegime string `pulumi:"complianceRegime"` // Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload DisplayName string `pulumi:"displayName"` @@ -370,10 +428,12 @@ type workloadArgs struct { // // *** Organization string `pulumi:"organization"` - // Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + // Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM Partner *string `pulumi:"partner"` // Optional. Permissions granted to the AW Partner SA account for the customer workload PartnerPermissions *WorkloadPartnerPermissions `pulumi:"partnerPermissions"` + // Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + PartnerServicesBillingAccount *string `pulumi:"partnerServicesBillingAccount"` // Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} ProvisionedResourcesParent *string `pulumi:"provisionedResourcesParent"` // Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional. @@ -386,7 +446,7 @@ type workloadArgs struct { type WorkloadArgs struct { // Optional. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. BillingAccount pulumi.StringPtrInput - // Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + // Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS ComplianceRegime pulumi.StringInput // Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload DisplayName pulumi.StringInput @@ -405,10 +465,12 @@ type WorkloadArgs struct { // // *** Organization pulumi.StringInput - // Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + // Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM Partner pulumi.StringPtrInput // Optional. Permissions granted to the AW Partner SA account for the customer workload PartnerPermissions WorkloadPartnerPermissionsPtrInput + // Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + PartnerServicesBillingAccount pulumi.StringPtrInput // Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} ProvisionedResourcesParent pulumi.StringPtrInput // Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional. @@ -509,7 +571,7 @@ func (o WorkloadOutput) BillingAccount() pulumi.StringPtrOutput { return o.ApplyT(func(v *Workload) pulumi.StringPtrOutput { return v.BillingAccount }).(pulumi.StringPtrOutput) } -// Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT +// Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS func (o WorkloadOutput) ComplianceRegime() pulumi.StringOutput { return o.ApplyT(func(v *Workload) pulumi.StringOutput { return v.ComplianceRegime }).(pulumi.StringOutput) } @@ -584,7 +646,7 @@ func (o WorkloadOutput) Organization() pulumi.StringOutput { return o.ApplyT(func(v *Workload) pulumi.StringOutput { return v.Organization }).(pulumi.StringOutput) } -// Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN +// Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM func (o WorkloadOutput) Partner() pulumi.StringPtrOutput { return o.ApplyT(func(v *Workload) pulumi.StringPtrOutput { return v.Partner }).(pulumi.StringPtrOutput) } @@ -594,6 +656,11 @@ func (o WorkloadOutput) PartnerPermissions() WorkloadPartnerPermissionsPtrOutput return o.ApplyT(func(v *Workload) WorkloadPartnerPermissionsPtrOutput { return v.PartnerPermissions }).(WorkloadPartnerPermissionsPtrOutput) } +// Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. +func (o WorkloadOutput) PartnerServicesBillingAccount() pulumi.StringPtrOutput { + return o.ApplyT(func(v *Workload) pulumi.StringPtrOutput { return v.PartnerServicesBillingAccount }).(pulumi.StringPtrOutput) +} + // Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} func (o WorkloadOutput) ProvisionedResourcesParent() pulumi.StringPtrOutput { return o.ApplyT(func(v *Workload) pulumi.StringPtrOutput { return v.ProvisionedResourcesParent }).(pulumi.StringPtrOutput) diff --git a/sdk/go/gcp/backupdisasterrecovery/backupVault.go b/sdk/go/gcp/backupdisasterrecovery/backupVault.go new file mode 100644 index 0000000000..3504ffbc77 --- /dev/null +++ b/sdk/go/gcp/backupdisasterrecovery/backupVault.go @@ -0,0 +1,689 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package backupdisasterrecovery + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// ## Example Usage +// +// ### Backup Dr Backup Vault Full +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/backupdisasterrecovery" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := backupdisasterrecovery.NewBackupVault(ctx, "backup-vault-test", &backupdisasterrecovery.BackupVaultArgs{ +// Location: pulumi.String("us-central1"), +// BackupVaultId: pulumi.String("backup-vault-test"), +// Description: pulumi.String("This is a second backup vault built by Terraform."), +// BackupMinimumEnforcedRetentionDuration: pulumi.String("100000s"), +// Labels: pulumi.StringMap{ +// "foo": pulumi.String("bar1"), +// "bar": pulumi.String("baz1"), +// }, +// Annotations: pulumi.StringMap{ +// "annotations1": pulumi.String("bar1"), +// "annotations2": pulumi.String("baz1"), +// }, +// ForceUpdate: pulumi.Bool(true), +// ForceDelete: pulumi.Bool(true), +// AllowMissing: pulumi.Bool(true), +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// +// ## Import +// +// BackupVault can be imported using any of these accepted formats: +// +// * `projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}` +// +// * `{{project}}/{{location}}/{{backup_vault_id}}` +// +// * `{{location}}/{{backup_vault_id}}` +// +// When using the `pulumi import` command, BackupVault can be imported using one of the formats above. For example: +// +// ```sh +// $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}} +// ``` +// +// ```sh +// $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default {{project}}/{{location}}/{{backup_vault_id}} +// ``` +// +// ```sh +// $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default {{location}}/{{backup_vault_id}} +// ``` +type BackupVault struct { + pulumi.CustomResourceState + + // Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + AllowMissing pulumi.BoolPtrOutput `pulumi:"allowMissing"` + // Optional. User annotations. See https://google.aip.dev/128#annotations + // Stores small amounts of arbitrary data. + // **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + // Please refer to the field `effectiveAnnotations` for all of the annotations present on the resource. + Annotations pulumi.StringMapOutput `pulumi:"annotations"` + // Output only. The number of backups in this backup vault. + BackupCount pulumi.StringOutput `pulumi:"backupCount"` + // Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + BackupMinimumEnforcedRetentionDuration pulumi.StringOutput `pulumi:"backupMinimumEnforcedRetentionDuration"` + // Required. ID of the requesting object. + // + // *** + BackupVaultId pulumi.StringOutput `pulumi:"backupVaultId"` + // Output only. The time when the instance was created. + CreateTime pulumi.StringOutput `pulumi:"createTime"` + // Output only. Set to true when there are no backups nested under this resource. + Deletable pulumi.BoolOutput `pulumi:"deletable"` + // Optional. The description of the BackupVault instance (2048 characters or less). + Description pulumi.StringPtrOutput `pulumi:"description"` + EffectiveAnnotations pulumi.StringMapOutput `pulumi:"effectiveAnnotations"` + // All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + EffectiveLabels pulumi.StringMapOutput `pulumi:"effectiveLabels"` + // Optional. Time after which the BackupVault resource is locked. + EffectiveTime pulumi.StringPtrOutput `pulumi:"effectiveTime"` + // Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + Etag pulumi.StringOutput `pulumi:"etag"` + // If set, the following restrictions against deletion of the backup vault instance can be overridden: + // * deletion of a backup vault instance containing no backups, but still containing empty datasources. + // * deletion of a backup vault instance that is being referenced by an active backup plan. + ForceDelete pulumi.BoolPtrOutput `pulumi:"forceDelete"` + // If set, allow update to extend the minimum enforced retention for backup vault. This overrides + // the restriction against conflicting retention periods. This conflict may occur when the + // expiration schedule defined by the associated backup plan is shorter than the minimum + // retention set by the backup vault. + ForceUpdate pulumi.BoolPtrOutput `pulumi:"forceUpdate"` + // Optional. Resource labels to represent user provided metadata. + // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + // Please refer to the field `effectiveLabels` for all of the labels present on the resource. + Labels pulumi.StringMapOutput `pulumi:"labels"` + // The GCP location for the backup vault. + Location pulumi.StringOutput `pulumi:"location"` + // Output only. Identifier. The resource name. + Name pulumi.StringOutput `pulumi:"name"` + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + Project pulumi.StringOutput `pulumi:"project"` + // The combination of labels configured directly on the resource + // and default labels configured on the provider. + PulumiLabels pulumi.StringMapOutput `pulumi:"pulumiLabels"` + // Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + ServiceAccount pulumi.StringOutput `pulumi:"serviceAccount"` + // Output only. The BackupVault resource instance state. + // Possible values: + // STATE_UNSPECIFIED + // CREATING + // ACTIVE + // DELETING + // ERROR + State pulumi.StringOutput `pulumi:"state"` + // Output only. Total size of the storage used by all backup resources. + TotalStoredBytes pulumi.StringOutput `pulumi:"totalStoredBytes"` + // Output only. Output only Immutable after resource creation until resource deletion. + Uid pulumi.StringOutput `pulumi:"uid"` + // Output only. The time when the instance was updated. + UpdateTime pulumi.StringOutput `pulumi:"updateTime"` +} + +// NewBackupVault registers a new resource with the given unique name, arguments, and options. +func NewBackupVault(ctx *pulumi.Context, + name string, args *BackupVaultArgs, opts ...pulumi.ResourceOption) (*BackupVault, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.BackupMinimumEnforcedRetentionDuration == nil { + return nil, errors.New("invalid value for required argument 'BackupMinimumEnforcedRetentionDuration'") + } + if args.BackupVaultId == nil { + return nil, errors.New("invalid value for required argument 'BackupVaultId'") + } + if args.Location == nil { + return nil, errors.New("invalid value for required argument 'Location'") + } + secrets := pulumi.AdditionalSecretOutputs([]string{ + "effectiveLabels", + "pulumiLabels", + }) + opts = append(opts, secrets) + opts = internal.PkgResourceDefaultOpts(opts) + var resource BackupVault + err := ctx.RegisterResource("gcp:backupdisasterrecovery/backupVault:BackupVault", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetBackupVault gets an existing BackupVault resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetBackupVault(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *BackupVaultState, opts ...pulumi.ResourceOption) (*BackupVault, error) { + var resource BackupVault + err := ctx.ReadResource("gcp:backupdisasterrecovery/backupVault:BackupVault", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering BackupVault resources. +type backupVaultState struct { + // Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + AllowMissing *bool `pulumi:"allowMissing"` + // Optional. User annotations. See https://google.aip.dev/128#annotations + // Stores small amounts of arbitrary data. + // **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + // Please refer to the field `effectiveAnnotations` for all of the annotations present on the resource. + Annotations map[string]string `pulumi:"annotations"` + // Output only. The number of backups in this backup vault. + BackupCount *string `pulumi:"backupCount"` + // Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + BackupMinimumEnforcedRetentionDuration *string `pulumi:"backupMinimumEnforcedRetentionDuration"` + // Required. ID of the requesting object. + // + // *** + BackupVaultId *string `pulumi:"backupVaultId"` + // Output only. The time when the instance was created. + CreateTime *string `pulumi:"createTime"` + // Output only. Set to true when there are no backups nested under this resource. + Deletable *bool `pulumi:"deletable"` + // Optional. The description of the BackupVault instance (2048 characters or less). + Description *string `pulumi:"description"` + EffectiveAnnotations map[string]string `pulumi:"effectiveAnnotations"` + // All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + EffectiveLabels map[string]string `pulumi:"effectiveLabels"` + // Optional. Time after which the BackupVault resource is locked. + EffectiveTime *string `pulumi:"effectiveTime"` + // Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + Etag *string `pulumi:"etag"` + // If set, the following restrictions against deletion of the backup vault instance can be overridden: + // * deletion of a backup vault instance containing no backups, but still containing empty datasources. + // * deletion of a backup vault instance that is being referenced by an active backup plan. + ForceDelete *bool `pulumi:"forceDelete"` + // If set, allow update to extend the minimum enforced retention for backup vault. This overrides + // the restriction against conflicting retention periods. This conflict may occur when the + // expiration schedule defined by the associated backup plan is shorter than the minimum + // retention set by the backup vault. + ForceUpdate *bool `pulumi:"forceUpdate"` + // Optional. Resource labels to represent user provided metadata. + // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + // Please refer to the field `effectiveLabels` for all of the labels present on the resource. + Labels map[string]string `pulumi:"labels"` + // The GCP location for the backup vault. + Location *string `pulumi:"location"` + // Output only. Identifier. The resource name. + Name *string `pulumi:"name"` + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + Project *string `pulumi:"project"` + // The combination of labels configured directly on the resource + // and default labels configured on the provider. + PulumiLabels map[string]string `pulumi:"pulumiLabels"` + // Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + ServiceAccount *string `pulumi:"serviceAccount"` + // Output only. The BackupVault resource instance state. + // Possible values: + // STATE_UNSPECIFIED + // CREATING + // ACTIVE + // DELETING + // ERROR + State *string `pulumi:"state"` + // Output only. Total size of the storage used by all backup resources. + TotalStoredBytes *string `pulumi:"totalStoredBytes"` + // Output only. Output only Immutable after resource creation until resource deletion. + Uid *string `pulumi:"uid"` + // Output only. The time when the instance was updated. + UpdateTime *string `pulumi:"updateTime"` +} + +type BackupVaultState struct { + // Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + AllowMissing pulumi.BoolPtrInput + // Optional. User annotations. See https://google.aip.dev/128#annotations + // Stores small amounts of arbitrary data. + // **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + // Please refer to the field `effectiveAnnotations` for all of the annotations present on the resource. + Annotations pulumi.StringMapInput + // Output only. The number of backups in this backup vault. + BackupCount pulumi.StringPtrInput + // Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + BackupMinimumEnforcedRetentionDuration pulumi.StringPtrInput + // Required. ID of the requesting object. + // + // *** + BackupVaultId pulumi.StringPtrInput + // Output only. The time when the instance was created. + CreateTime pulumi.StringPtrInput + // Output only. Set to true when there are no backups nested under this resource. + Deletable pulumi.BoolPtrInput + // Optional. The description of the BackupVault instance (2048 characters or less). + Description pulumi.StringPtrInput + EffectiveAnnotations pulumi.StringMapInput + // All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + EffectiveLabels pulumi.StringMapInput + // Optional. Time after which the BackupVault resource is locked. + EffectiveTime pulumi.StringPtrInput + // Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + Etag pulumi.StringPtrInput + // If set, the following restrictions against deletion of the backup vault instance can be overridden: + // * deletion of a backup vault instance containing no backups, but still containing empty datasources. + // * deletion of a backup vault instance that is being referenced by an active backup plan. + ForceDelete pulumi.BoolPtrInput + // If set, allow update to extend the minimum enforced retention for backup vault. This overrides + // the restriction against conflicting retention periods. This conflict may occur when the + // expiration schedule defined by the associated backup plan is shorter than the minimum + // retention set by the backup vault. + ForceUpdate pulumi.BoolPtrInput + // Optional. Resource labels to represent user provided metadata. + // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + // Please refer to the field `effectiveLabels` for all of the labels present on the resource. + Labels pulumi.StringMapInput + // The GCP location for the backup vault. + Location pulumi.StringPtrInput + // Output only. Identifier. The resource name. + Name pulumi.StringPtrInput + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + Project pulumi.StringPtrInput + // The combination of labels configured directly on the resource + // and default labels configured on the provider. + PulumiLabels pulumi.StringMapInput + // Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + ServiceAccount pulumi.StringPtrInput + // Output only. The BackupVault resource instance state. + // Possible values: + // STATE_UNSPECIFIED + // CREATING + // ACTIVE + // DELETING + // ERROR + State pulumi.StringPtrInput + // Output only. Total size of the storage used by all backup resources. + TotalStoredBytes pulumi.StringPtrInput + // Output only. Output only Immutable after resource creation until resource deletion. + Uid pulumi.StringPtrInput + // Output only. The time when the instance was updated. + UpdateTime pulumi.StringPtrInput +} + +func (BackupVaultState) ElementType() reflect.Type { + return reflect.TypeOf((*backupVaultState)(nil)).Elem() +} + +type backupVaultArgs struct { + // Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + AllowMissing *bool `pulumi:"allowMissing"` + // Optional. User annotations. See https://google.aip.dev/128#annotations + // Stores small amounts of arbitrary data. + // **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + // Please refer to the field `effectiveAnnotations` for all of the annotations present on the resource. + Annotations map[string]string `pulumi:"annotations"` + // Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + BackupMinimumEnforcedRetentionDuration string `pulumi:"backupMinimumEnforcedRetentionDuration"` + // Required. ID of the requesting object. + // + // *** + BackupVaultId string `pulumi:"backupVaultId"` + // Optional. The description of the BackupVault instance (2048 characters or less). + Description *string `pulumi:"description"` + // Optional. Time after which the BackupVault resource is locked. + EffectiveTime *string `pulumi:"effectiveTime"` + // If set, the following restrictions against deletion of the backup vault instance can be overridden: + // * deletion of a backup vault instance containing no backups, but still containing empty datasources. + // * deletion of a backup vault instance that is being referenced by an active backup plan. + ForceDelete *bool `pulumi:"forceDelete"` + // If set, allow update to extend the minimum enforced retention for backup vault. This overrides + // the restriction against conflicting retention periods. This conflict may occur when the + // expiration schedule defined by the associated backup plan is shorter than the minimum + // retention set by the backup vault. + ForceUpdate *bool `pulumi:"forceUpdate"` + // Optional. Resource labels to represent user provided metadata. + // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + // Please refer to the field `effectiveLabels` for all of the labels present on the resource. + Labels map[string]string `pulumi:"labels"` + // The GCP location for the backup vault. + Location string `pulumi:"location"` + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + Project *string `pulumi:"project"` +} + +// The set of arguments for constructing a BackupVault resource. +type BackupVaultArgs struct { + // Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + AllowMissing pulumi.BoolPtrInput + // Optional. User annotations. See https://google.aip.dev/128#annotations + // Stores small amounts of arbitrary data. + // **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + // Please refer to the field `effectiveAnnotations` for all of the annotations present on the resource. + Annotations pulumi.StringMapInput + // Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + BackupMinimumEnforcedRetentionDuration pulumi.StringInput + // Required. ID of the requesting object. + // + // *** + BackupVaultId pulumi.StringInput + // Optional. The description of the BackupVault instance (2048 characters or less). + Description pulumi.StringPtrInput + // Optional. Time after which the BackupVault resource is locked. + EffectiveTime pulumi.StringPtrInput + // If set, the following restrictions against deletion of the backup vault instance can be overridden: + // * deletion of a backup vault instance containing no backups, but still containing empty datasources. + // * deletion of a backup vault instance that is being referenced by an active backup plan. + ForceDelete pulumi.BoolPtrInput + // If set, allow update to extend the minimum enforced retention for backup vault. This overrides + // the restriction against conflicting retention periods. This conflict may occur when the + // expiration schedule defined by the associated backup plan is shorter than the minimum + // retention set by the backup vault. + ForceUpdate pulumi.BoolPtrInput + // Optional. Resource labels to represent user provided metadata. + // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + // Please refer to the field `effectiveLabels` for all of the labels present on the resource. + Labels pulumi.StringMapInput + // The GCP location for the backup vault. + Location pulumi.StringInput + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + Project pulumi.StringPtrInput +} + +func (BackupVaultArgs) ElementType() reflect.Type { + return reflect.TypeOf((*backupVaultArgs)(nil)).Elem() +} + +type BackupVaultInput interface { + pulumi.Input + + ToBackupVaultOutput() BackupVaultOutput + ToBackupVaultOutputWithContext(ctx context.Context) BackupVaultOutput +} + +func (*BackupVault) ElementType() reflect.Type { + return reflect.TypeOf((**BackupVault)(nil)).Elem() +} + +func (i *BackupVault) ToBackupVaultOutput() BackupVaultOutput { + return i.ToBackupVaultOutputWithContext(context.Background()) +} + +func (i *BackupVault) ToBackupVaultOutputWithContext(ctx context.Context) BackupVaultOutput { + return pulumi.ToOutputWithContext(ctx, i).(BackupVaultOutput) +} + +// BackupVaultArrayInput is an input type that accepts BackupVaultArray and BackupVaultArrayOutput values. +// You can construct a concrete instance of `BackupVaultArrayInput` via: +// +// BackupVaultArray{ BackupVaultArgs{...} } +type BackupVaultArrayInput interface { + pulumi.Input + + ToBackupVaultArrayOutput() BackupVaultArrayOutput + ToBackupVaultArrayOutputWithContext(context.Context) BackupVaultArrayOutput +} + +type BackupVaultArray []BackupVaultInput + +func (BackupVaultArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]*BackupVault)(nil)).Elem() +} + +func (i BackupVaultArray) ToBackupVaultArrayOutput() BackupVaultArrayOutput { + return i.ToBackupVaultArrayOutputWithContext(context.Background()) +} + +func (i BackupVaultArray) ToBackupVaultArrayOutputWithContext(ctx context.Context) BackupVaultArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(BackupVaultArrayOutput) +} + +// BackupVaultMapInput is an input type that accepts BackupVaultMap and BackupVaultMapOutput values. +// You can construct a concrete instance of `BackupVaultMapInput` via: +// +// BackupVaultMap{ "key": BackupVaultArgs{...} } +type BackupVaultMapInput interface { + pulumi.Input + + ToBackupVaultMapOutput() BackupVaultMapOutput + ToBackupVaultMapOutputWithContext(context.Context) BackupVaultMapOutput +} + +type BackupVaultMap map[string]BackupVaultInput + +func (BackupVaultMap) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*BackupVault)(nil)).Elem() +} + +func (i BackupVaultMap) ToBackupVaultMapOutput() BackupVaultMapOutput { + return i.ToBackupVaultMapOutputWithContext(context.Background()) +} + +func (i BackupVaultMap) ToBackupVaultMapOutputWithContext(ctx context.Context) BackupVaultMapOutput { + return pulumi.ToOutputWithContext(ctx, i).(BackupVaultMapOutput) +} + +type BackupVaultOutput struct{ *pulumi.OutputState } + +func (BackupVaultOutput) ElementType() reflect.Type { + return reflect.TypeOf((**BackupVault)(nil)).Elem() +} + +func (o BackupVaultOutput) ToBackupVaultOutput() BackupVaultOutput { + return o +} + +func (o BackupVaultOutput) ToBackupVaultOutputWithContext(ctx context.Context) BackupVaultOutput { + return o +} + +// Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. +func (o BackupVaultOutput) AllowMissing() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *BackupVault) pulumi.BoolPtrOutput { return v.AllowMissing }).(pulumi.BoolPtrOutput) +} + +// Optional. User annotations. See https://google.aip.dev/128#annotations +// Stores small amounts of arbitrary data. +// **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. +// Please refer to the field `effectiveAnnotations` for all of the annotations present on the resource. +func (o BackupVaultOutput) Annotations() pulumi.StringMapOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringMapOutput { return v.Annotations }).(pulumi.StringMapOutput) +} + +// Output only. The number of backups in this backup vault. +func (o BackupVaultOutput) BackupCount() pulumi.StringOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringOutput { return v.BackupCount }).(pulumi.StringOutput) +} + +// Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. +func (o BackupVaultOutput) BackupMinimumEnforcedRetentionDuration() pulumi.StringOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringOutput { return v.BackupMinimumEnforcedRetentionDuration }).(pulumi.StringOutput) +} + +// Required. ID of the requesting object. +// +// *** +func (o BackupVaultOutput) BackupVaultId() pulumi.StringOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringOutput { return v.BackupVaultId }).(pulumi.StringOutput) +} + +// Output only. The time when the instance was created. +func (o BackupVaultOutput) CreateTime() pulumi.StringOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringOutput { return v.CreateTime }).(pulumi.StringOutput) +} + +// Output only. Set to true when there are no backups nested under this resource. +func (o BackupVaultOutput) Deletable() pulumi.BoolOutput { + return o.ApplyT(func(v *BackupVault) pulumi.BoolOutput { return v.Deletable }).(pulumi.BoolOutput) +} + +// Optional. The description of the BackupVault instance (2048 characters or less). +func (o BackupVaultOutput) Description() pulumi.StringPtrOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringPtrOutput { return v.Description }).(pulumi.StringPtrOutput) +} + +func (o BackupVaultOutput) EffectiveAnnotations() pulumi.StringMapOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringMapOutput { return v.EffectiveAnnotations }).(pulumi.StringMapOutput) +} + +// All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. +func (o BackupVaultOutput) EffectiveLabels() pulumi.StringMapOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringMapOutput { return v.EffectiveLabels }).(pulumi.StringMapOutput) +} + +// Optional. Time after which the BackupVault resource is locked. +func (o BackupVaultOutput) EffectiveTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringPtrOutput { return v.EffectiveTime }).(pulumi.StringPtrOutput) +} + +// Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. +func (o BackupVaultOutput) Etag() pulumi.StringOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringOutput { return v.Etag }).(pulumi.StringOutput) +} + +// If set, the following restrictions against deletion of the backup vault instance can be overridden: +// * deletion of a backup vault instance containing no backups, but still containing empty datasources. +// * deletion of a backup vault instance that is being referenced by an active backup plan. +func (o BackupVaultOutput) ForceDelete() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *BackupVault) pulumi.BoolPtrOutput { return v.ForceDelete }).(pulumi.BoolPtrOutput) +} + +// If set, allow update to extend the minimum enforced retention for backup vault. This overrides +// the restriction against conflicting retention periods. This conflict may occur when the +// expiration schedule defined by the associated backup plan is shorter than the minimum +// retention set by the backup vault. +func (o BackupVaultOutput) ForceUpdate() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *BackupVault) pulumi.BoolPtrOutput { return v.ForceUpdate }).(pulumi.BoolPtrOutput) +} + +// Optional. Resource labels to represent user provided metadata. +// **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +// Please refer to the field `effectiveLabels` for all of the labels present on the resource. +func (o BackupVaultOutput) Labels() pulumi.StringMapOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringMapOutput { return v.Labels }).(pulumi.StringMapOutput) +} + +// The GCP location for the backup vault. +func (o BackupVaultOutput) Location() pulumi.StringOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringOutput { return v.Location }).(pulumi.StringOutput) +} + +// Output only. Identifier. The resource name. +func (o BackupVaultOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) +} + +// The ID of the project in which the resource belongs. +// If it is not provided, the provider project is used. +func (o BackupVaultOutput) Project() pulumi.StringOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringOutput { return v.Project }).(pulumi.StringOutput) +} + +// The combination of labels configured directly on the resource +// and default labels configured on the provider. +func (o BackupVaultOutput) PulumiLabels() pulumi.StringMapOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringMapOutput { return v.PulumiLabels }).(pulumi.StringMapOutput) +} + +// Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. +func (o BackupVaultOutput) ServiceAccount() pulumi.StringOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringOutput { return v.ServiceAccount }).(pulumi.StringOutput) +} + +// Output only. The BackupVault resource instance state. +// Possible values: +// STATE_UNSPECIFIED +// CREATING +// ACTIVE +// DELETING +// ERROR +func (o BackupVaultOutput) State() pulumi.StringOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringOutput { return v.State }).(pulumi.StringOutput) +} + +// Output only. Total size of the storage used by all backup resources. +func (o BackupVaultOutput) TotalStoredBytes() pulumi.StringOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringOutput { return v.TotalStoredBytes }).(pulumi.StringOutput) +} + +// Output only. Output only Immutable after resource creation until resource deletion. +func (o BackupVaultOutput) Uid() pulumi.StringOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringOutput { return v.Uid }).(pulumi.StringOutput) +} + +// Output only. The time when the instance was updated. +func (o BackupVaultOutput) UpdateTime() pulumi.StringOutput { + return o.ApplyT(func(v *BackupVault) pulumi.StringOutput { return v.UpdateTime }).(pulumi.StringOutput) +} + +type BackupVaultArrayOutput struct{ *pulumi.OutputState } + +func (BackupVaultArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]*BackupVault)(nil)).Elem() +} + +func (o BackupVaultArrayOutput) ToBackupVaultArrayOutput() BackupVaultArrayOutput { + return o +} + +func (o BackupVaultArrayOutput) ToBackupVaultArrayOutputWithContext(ctx context.Context) BackupVaultArrayOutput { + return o +} + +func (o BackupVaultArrayOutput) Index(i pulumi.IntInput) BackupVaultOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) *BackupVault { + return vs[0].([]*BackupVault)[vs[1].(int)] + }).(BackupVaultOutput) +} + +type BackupVaultMapOutput struct{ *pulumi.OutputState } + +func (BackupVaultMapOutput) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*BackupVault)(nil)).Elem() +} + +func (o BackupVaultMapOutput) ToBackupVaultMapOutput() BackupVaultMapOutput { + return o +} + +func (o BackupVaultMapOutput) ToBackupVaultMapOutputWithContext(ctx context.Context) BackupVaultMapOutput { + return o +} + +func (o BackupVaultMapOutput) MapIndex(k pulumi.StringInput) BackupVaultOutput { + return pulumi.All(o, k).ApplyT(func(vs []interface{}) *BackupVault { + return vs[0].(map[string]*BackupVault)[vs[1].(string)] + }).(BackupVaultOutput) +} + +func init() { + pulumi.RegisterInputType(reflect.TypeOf((*BackupVaultInput)(nil)).Elem(), &BackupVault{}) + pulumi.RegisterInputType(reflect.TypeOf((*BackupVaultArrayInput)(nil)).Elem(), BackupVaultArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*BackupVaultMapInput)(nil)).Elem(), BackupVaultMap{}) + pulumi.RegisterOutputType(BackupVaultOutput{}) + pulumi.RegisterOutputType(BackupVaultArrayOutput{}) + pulumi.RegisterOutputType(BackupVaultMapOutput{}) +} diff --git a/sdk/go/gcp/backupdisasterrecovery/init.go b/sdk/go/gcp/backupdisasterrecovery/init.go index 8541af29f2..7706381cd0 100644 --- a/sdk/go/gcp/backupdisasterrecovery/init.go +++ b/sdk/go/gcp/backupdisasterrecovery/init.go @@ -21,6 +21,8 @@ func (m *module) Version() semver.Version { func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi.Resource, err error) { switch typ { + case "gcp:backupdisasterrecovery/backupVault:BackupVault": + r = &BackupVault{} case "gcp:backupdisasterrecovery/managementServer:ManagementServer": r = &ManagementServer{} default: @@ -36,6 +38,11 @@ func init() { if err != nil { version = semver.Version{Major: 1} } + pulumi.RegisterResourceModule( + "gcp", + "backupdisasterrecovery/backupVault", + &module{version}, + ) pulumi.RegisterResourceModule( "gcp", "backupdisasterrecovery/managementServer", diff --git a/sdk/go/gcp/bigquery/dataTransferConfig.go b/sdk/go/gcp/bigquery/dataTransferConfig.go index 4add0923ab..4a19d01b95 100644 --- a/sdk/go/gcp/bigquery/dataTransferConfig.go +++ b/sdk/go/gcp/bigquery/dataTransferConfig.go @@ -86,6 +86,87 @@ import ( // } // // ``` +// ### Bigquerydatatransfer Config Cmek +// +// ```go +// package main +// +// import ( +// +// "fmt" +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// project, err := organizations.LookupProject(ctx, nil, nil) +// if err != nil { +// return err +// } +// permissions, err := projects.NewIAMMember(ctx, "permissions", &projects.IAMMemberArgs{ +// Project: pulumi.String(project.ProjectId), +// Role: pulumi.String("roles/iam.serviceAccountTokenCreator"), +// Member: pulumi.Sprintf("serviceAccount:service-%v@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com", project.Number), +// }) +// if err != nil { +// return err +// } +// myDataset, err := bigquery.NewDataset(ctx, "my_dataset", &bigquery.DatasetArgs{ +// DatasetId: pulumi.String("example_dataset"), +// FriendlyName: pulumi.String("foo"), +// Description: pulumi.String("bar"), +// Location: pulumi.String("asia-northeast1"), +// }, pulumi.DependsOn([]pulumi.Resource{ +// permissions, +// })) +// if err != nil { +// return err +// } +// keyRing, err := kms.NewKeyRing(ctx, "key_ring", &kms.KeyRingArgs{ +// Name: pulumi.String("example-keyring"), +// Location: pulumi.String("us"), +// }) +// if err != nil { +// return err +// } +// cryptoKey, err := kms.NewCryptoKey(ctx, "crypto_key", &kms.CryptoKeyArgs{ +// Name: pulumi.String("example-key"), +// KeyRing: keyRing.ID(), +// }) +// if err != nil { +// return err +// } +// _, err = bigquery.NewDataTransferConfig(ctx, "query_config_cmek", &bigquery.DataTransferConfigArgs{ +// DisplayName: pulumi.String(""), +// Location: pulumi.String("asia-northeast1"), +// DataSourceId: pulumi.String("scheduled_query"), +// Schedule: pulumi.String("first sunday of quarter 00:00"), +// DestinationDatasetId: myDataset.DatasetId, +// Params: pulumi.StringMap{ +// "destination_table_name_template": pulumi.String("my_table"), +// "write_disposition": pulumi.String("WRITE_APPEND"), +// "query": pulumi.String("SELECT name FROM tabl WHERE x = 'y'"), +// }, +// EncryptionConfiguration: &bigquery.DataTransferConfigEncryptionConfigurationArgs{ +// KmsKeyName: cryptoKey.ID(), +// }, +// }, pulumi.DependsOn([]pulumi.Resource{ +// permissions, +// })) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` // ### Bigquerydatatransfer Config Salesforce // // ```go @@ -122,10 +203,8 @@ import ( // Params: pulumi.StringMap{ // "connector.authentication.oauth.clientId": pulumi.String("client-id"), // "connector.authentication.oauth.clientSecret": pulumi.String("client-secret"), -// "connector.authentication.username": pulumi.String("username"), -// "connector.authentication.password": pulumi.String("password"), -// "connector.authentication.securityToken": pulumi.String("security-token"), -// "assets": pulumi.String("[\"asset-a\",\"asset-b\"]"), +// "connector.authentication.oauth.myDomain": pulumi.String("MyDomainName"), +// "assets": pulumi.String("[\"asset-a\",\"asset-b\"]"), // }, // }) // if err != nil { @@ -169,6 +248,9 @@ type DataTransferConfig struct { // email address of the user who owns this transfer config. // Structure is documented below. EmailPreferences DataTransferConfigEmailPreferencesPtrOutput `pulumi:"emailPreferences"` + // Represents the encryption configuration for a transfer. + // Structure is documented below. + EncryptionConfiguration DataTransferConfigEncryptionConfigurationPtrOutput `pulumi:"encryptionConfiguration"` // The geographic location where the transfer config should reside. // Examples: US, EU, asia-northeast1. The default value is US. Location pulumi.StringPtrOutput `pulumi:"location"` @@ -275,6 +357,9 @@ type dataTransferConfigState struct { // email address of the user who owns this transfer config. // Structure is documented below. EmailPreferences *DataTransferConfigEmailPreferences `pulumi:"emailPreferences"` + // Represents the encryption configuration for a transfer. + // Structure is documented below. + EncryptionConfiguration *DataTransferConfigEncryptionConfiguration `pulumi:"encryptionConfiguration"` // The geographic location where the transfer config should reside. // Examples: US, EU, asia-northeast1. The default value is US. Location *string `pulumi:"location"` @@ -343,6 +428,9 @@ type DataTransferConfigState struct { // email address of the user who owns this transfer config. // Structure is documented below. EmailPreferences DataTransferConfigEmailPreferencesPtrInput + // Represents the encryption configuration for a transfer. + // Structure is documented below. + EncryptionConfiguration DataTransferConfigEncryptionConfigurationPtrInput // The geographic location where the transfer config should reside. // Examples: US, EU, asia-northeast1. The default value is US. Location pulumi.StringPtrInput @@ -415,6 +503,9 @@ type dataTransferConfigArgs struct { // email address of the user who owns this transfer config. // Structure is documented below. EmailPreferences *DataTransferConfigEmailPreferences `pulumi:"emailPreferences"` + // Represents the encryption configuration for a transfer. + // Structure is documented below. + EncryptionConfiguration *DataTransferConfigEncryptionConfiguration `pulumi:"encryptionConfiguration"` // The geographic location where the transfer config should reside. // Examples: US, EU, asia-northeast1. The default value is US. Location *string `pulumi:"location"` @@ -478,6 +569,9 @@ type DataTransferConfigArgs struct { // email address of the user who owns this transfer config. // Structure is documented below. EmailPreferences DataTransferConfigEmailPreferencesPtrInput + // Represents the encryption configuration for a transfer. + // Structure is documented below. + EncryptionConfiguration DataTransferConfigEncryptionConfigurationPtrInput // The geographic location where the transfer config should reside. // Examples: US, EU, asia-northeast1. The default value is US. Location pulumi.StringPtrInput @@ -644,6 +738,14 @@ func (o DataTransferConfigOutput) EmailPreferences() DataTransferConfigEmailPref return o.ApplyT(func(v *DataTransferConfig) DataTransferConfigEmailPreferencesPtrOutput { return v.EmailPreferences }).(DataTransferConfigEmailPreferencesPtrOutput) } +// Represents the encryption configuration for a transfer. +// Structure is documented below. +func (o DataTransferConfigOutput) EncryptionConfiguration() DataTransferConfigEncryptionConfigurationPtrOutput { + return o.ApplyT(func(v *DataTransferConfig) DataTransferConfigEncryptionConfigurationPtrOutput { + return v.EncryptionConfiguration + }).(DataTransferConfigEncryptionConfigurationPtrOutput) +} + // The geographic location where the transfer config should reside. // Examples: US, EU, asia-northeast1. The default value is US. func (o DataTransferConfigOutput) Location() pulumi.StringPtrOutput { diff --git a/sdk/go/gcp/bigquery/pulumiTypes.go b/sdk/go/gcp/bigquery/pulumiTypes.go index 9d91a30639..863add1669 100644 --- a/sdk/go/gcp/bigquery/pulumiTypes.go +++ b/sdk/go/gcp/bigquery/pulumiTypes.go @@ -2844,6 +2844,143 @@ func (o DataTransferConfigEmailPreferencesPtrOutput) EnableFailureEmail() pulumi }).(pulumi.BoolPtrOutput) } +type DataTransferConfigEncryptionConfiguration struct { + // The name of the KMS key used for encrypting BigQuery data. + KmsKeyName string `pulumi:"kmsKeyName"` +} + +// DataTransferConfigEncryptionConfigurationInput is an input type that accepts DataTransferConfigEncryptionConfigurationArgs and DataTransferConfigEncryptionConfigurationOutput values. +// You can construct a concrete instance of `DataTransferConfigEncryptionConfigurationInput` via: +// +// DataTransferConfigEncryptionConfigurationArgs{...} +type DataTransferConfigEncryptionConfigurationInput interface { + pulumi.Input + + ToDataTransferConfigEncryptionConfigurationOutput() DataTransferConfigEncryptionConfigurationOutput + ToDataTransferConfigEncryptionConfigurationOutputWithContext(context.Context) DataTransferConfigEncryptionConfigurationOutput +} + +type DataTransferConfigEncryptionConfigurationArgs struct { + // The name of the KMS key used for encrypting BigQuery data. + KmsKeyName pulumi.StringInput `pulumi:"kmsKeyName"` +} + +func (DataTransferConfigEncryptionConfigurationArgs) ElementType() reflect.Type { + return reflect.TypeOf((*DataTransferConfigEncryptionConfiguration)(nil)).Elem() +} + +func (i DataTransferConfigEncryptionConfigurationArgs) ToDataTransferConfigEncryptionConfigurationOutput() DataTransferConfigEncryptionConfigurationOutput { + return i.ToDataTransferConfigEncryptionConfigurationOutputWithContext(context.Background()) +} + +func (i DataTransferConfigEncryptionConfigurationArgs) ToDataTransferConfigEncryptionConfigurationOutputWithContext(ctx context.Context) DataTransferConfigEncryptionConfigurationOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataTransferConfigEncryptionConfigurationOutput) +} + +func (i DataTransferConfigEncryptionConfigurationArgs) ToDataTransferConfigEncryptionConfigurationPtrOutput() DataTransferConfigEncryptionConfigurationPtrOutput { + return i.ToDataTransferConfigEncryptionConfigurationPtrOutputWithContext(context.Background()) +} + +func (i DataTransferConfigEncryptionConfigurationArgs) ToDataTransferConfigEncryptionConfigurationPtrOutputWithContext(ctx context.Context) DataTransferConfigEncryptionConfigurationPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataTransferConfigEncryptionConfigurationOutput).ToDataTransferConfigEncryptionConfigurationPtrOutputWithContext(ctx) +} + +// DataTransferConfigEncryptionConfigurationPtrInput is an input type that accepts DataTransferConfigEncryptionConfigurationArgs, DataTransferConfigEncryptionConfigurationPtr and DataTransferConfigEncryptionConfigurationPtrOutput values. +// You can construct a concrete instance of `DataTransferConfigEncryptionConfigurationPtrInput` via: +// +// DataTransferConfigEncryptionConfigurationArgs{...} +// +// or: +// +// nil +type DataTransferConfigEncryptionConfigurationPtrInput interface { + pulumi.Input + + ToDataTransferConfigEncryptionConfigurationPtrOutput() DataTransferConfigEncryptionConfigurationPtrOutput + ToDataTransferConfigEncryptionConfigurationPtrOutputWithContext(context.Context) DataTransferConfigEncryptionConfigurationPtrOutput +} + +type dataTransferConfigEncryptionConfigurationPtrType DataTransferConfigEncryptionConfigurationArgs + +func DataTransferConfigEncryptionConfigurationPtr(v *DataTransferConfigEncryptionConfigurationArgs) DataTransferConfigEncryptionConfigurationPtrInput { + return (*dataTransferConfigEncryptionConfigurationPtrType)(v) +} + +func (*dataTransferConfigEncryptionConfigurationPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**DataTransferConfigEncryptionConfiguration)(nil)).Elem() +} + +func (i *dataTransferConfigEncryptionConfigurationPtrType) ToDataTransferConfigEncryptionConfigurationPtrOutput() DataTransferConfigEncryptionConfigurationPtrOutput { + return i.ToDataTransferConfigEncryptionConfigurationPtrOutputWithContext(context.Background()) +} + +func (i *dataTransferConfigEncryptionConfigurationPtrType) ToDataTransferConfigEncryptionConfigurationPtrOutputWithContext(ctx context.Context) DataTransferConfigEncryptionConfigurationPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataTransferConfigEncryptionConfigurationPtrOutput) +} + +type DataTransferConfigEncryptionConfigurationOutput struct{ *pulumi.OutputState } + +func (DataTransferConfigEncryptionConfigurationOutput) ElementType() reflect.Type { + return reflect.TypeOf((*DataTransferConfigEncryptionConfiguration)(nil)).Elem() +} + +func (o DataTransferConfigEncryptionConfigurationOutput) ToDataTransferConfigEncryptionConfigurationOutput() DataTransferConfigEncryptionConfigurationOutput { + return o +} + +func (o DataTransferConfigEncryptionConfigurationOutput) ToDataTransferConfigEncryptionConfigurationOutputWithContext(ctx context.Context) DataTransferConfigEncryptionConfigurationOutput { + return o +} + +func (o DataTransferConfigEncryptionConfigurationOutput) ToDataTransferConfigEncryptionConfigurationPtrOutput() DataTransferConfigEncryptionConfigurationPtrOutput { + return o.ToDataTransferConfigEncryptionConfigurationPtrOutputWithContext(context.Background()) +} + +func (o DataTransferConfigEncryptionConfigurationOutput) ToDataTransferConfigEncryptionConfigurationPtrOutputWithContext(ctx context.Context) DataTransferConfigEncryptionConfigurationPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v DataTransferConfigEncryptionConfiguration) *DataTransferConfigEncryptionConfiguration { + return &v + }).(DataTransferConfigEncryptionConfigurationPtrOutput) +} + +// The name of the KMS key used for encrypting BigQuery data. +func (o DataTransferConfigEncryptionConfigurationOutput) KmsKeyName() pulumi.StringOutput { + return o.ApplyT(func(v DataTransferConfigEncryptionConfiguration) string { return v.KmsKeyName }).(pulumi.StringOutput) +} + +type DataTransferConfigEncryptionConfigurationPtrOutput struct{ *pulumi.OutputState } + +func (DataTransferConfigEncryptionConfigurationPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**DataTransferConfigEncryptionConfiguration)(nil)).Elem() +} + +func (o DataTransferConfigEncryptionConfigurationPtrOutput) ToDataTransferConfigEncryptionConfigurationPtrOutput() DataTransferConfigEncryptionConfigurationPtrOutput { + return o +} + +func (o DataTransferConfigEncryptionConfigurationPtrOutput) ToDataTransferConfigEncryptionConfigurationPtrOutputWithContext(ctx context.Context) DataTransferConfigEncryptionConfigurationPtrOutput { + return o +} + +func (o DataTransferConfigEncryptionConfigurationPtrOutput) Elem() DataTransferConfigEncryptionConfigurationOutput { + return o.ApplyT(func(v *DataTransferConfigEncryptionConfiguration) DataTransferConfigEncryptionConfiguration { + if v != nil { + return *v + } + var ret DataTransferConfigEncryptionConfiguration + return ret + }).(DataTransferConfigEncryptionConfigurationOutput) +} + +// The name of the KMS key used for encrypting BigQuery data. +func (o DataTransferConfigEncryptionConfigurationPtrOutput) KmsKeyName() pulumi.StringPtrOutput { + return o.ApplyT(func(v *DataTransferConfigEncryptionConfiguration) *string { + if v == nil { + return nil + } + return &v.KmsKeyName + }).(pulumi.StringPtrOutput) +} + type DataTransferConfigScheduleOptions struct { // If true, automatic scheduling of data transfer runs for this // configuration will be disabled. The runs can be started on ad-hoc @@ -15982,6 +16119,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*ConnectionSparkSparkHistoryServerConfigPtrInput)(nil)).Elem(), ConnectionSparkSparkHistoryServerConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataTransferConfigEmailPreferencesInput)(nil)).Elem(), DataTransferConfigEmailPreferencesArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataTransferConfigEmailPreferencesPtrInput)(nil)).Elem(), DataTransferConfigEmailPreferencesArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataTransferConfigEncryptionConfigurationInput)(nil)).Elem(), DataTransferConfigEncryptionConfigurationArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataTransferConfigEncryptionConfigurationPtrInput)(nil)).Elem(), DataTransferConfigEncryptionConfigurationArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataTransferConfigScheduleOptionsInput)(nil)).Elem(), DataTransferConfigScheduleOptionsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataTransferConfigScheduleOptionsPtrInput)(nil)).Elem(), DataTransferConfigScheduleOptionsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataTransferConfigSensitiveParamsInput)(nil)).Elem(), DataTransferConfigSensitiveParamsArgs{}) @@ -16152,6 +16291,8 @@ func init() { pulumi.RegisterOutputType(ConnectionSparkSparkHistoryServerConfigPtrOutput{}) pulumi.RegisterOutputType(DataTransferConfigEmailPreferencesOutput{}) pulumi.RegisterOutputType(DataTransferConfigEmailPreferencesPtrOutput{}) + pulumi.RegisterOutputType(DataTransferConfigEncryptionConfigurationOutput{}) + pulumi.RegisterOutputType(DataTransferConfigEncryptionConfigurationPtrOutput{}) pulumi.RegisterOutputType(DataTransferConfigScheduleOptionsOutput{}) pulumi.RegisterOutputType(DataTransferConfigScheduleOptionsPtrOutput{}) pulumi.RegisterOutputType(DataTransferConfigSensitiveParamsOutput{}) diff --git a/sdk/go/gcp/bigqueryanalyticshub/dataExchange.go b/sdk/go/gcp/bigqueryanalyticshub/dataExchange.go index d55189e333..3ec9fa6a1e 100644 --- a/sdk/go/gcp/bigqueryanalyticshub/dataExchange.go +++ b/sdk/go/gcp/bigqueryanalyticshub/dataExchange.go @@ -50,6 +50,37 @@ import ( // } // // ``` +// ### Bigquery Analyticshub Data Exchange Dcr +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigqueryanalyticshub" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := bigqueryanalyticshub.NewDataExchange(ctx, "data_exchange", &bigqueryanalyticshub.DataExchangeArgs{ +// Location: pulumi.String("US"), +// DataExchangeId: pulumi.String("dcr_data_exchange"), +// DisplayName: pulumi.String("dcr_data_exchange"), +// Description: pulumi.String("example dcr data exchange"), +// SharingEnvironmentConfig: &bigqueryanalyticshub.DataExchangeSharingEnvironmentConfigArgs{ +// DcrExchangeConfig: nil, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` // // ## Import // @@ -107,6 +138,10 @@ type DataExchange struct { // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project pulumi.StringOutput `pulumi:"project"` + // Configurable data sharing environment option for a data exchange. + // This field is required for data clean room exchanges. + // Structure is documented below. + SharingEnvironmentConfig DataExchangeSharingEnvironmentConfigOutput `pulumi:"sharingEnvironmentConfig"` } // NewDataExchange registers a new resource with the given unique name, arguments, and options. @@ -172,6 +207,10 @@ type dataExchangeState struct { // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `pulumi:"project"` + // Configurable data sharing environment option for a data exchange. + // This field is required for data clean room exchanges. + // Structure is documented below. + SharingEnvironmentConfig *DataExchangeSharingEnvironmentConfig `pulumi:"sharingEnvironmentConfig"` } type DataExchangeState struct { @@ -199,6 +238,10 @@ type DataExchangeState struct { // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project pulumi.StringPtrInput + // Configurable data sharing environment option for a data exchange. + // This field is required for data clean room exchanges. + // Structure is documented below. + SharingEnvironmentConfig DataExchangeSharingEnvironmentConfigPtrInput } func (DataExchangeState) ElementType() reflect.Type { @@ -225,6 +268,10 @@ type dataExchangeArgs struct { // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `pulumi:"project"` + // Configurable data sharing environment option for a data exchange. + // This field is required for data clean room exchanges. + // Structure is documented below. + SharingEnvironmentConfig *DataExchangeSharingEnvironmentConfig `pulumi:"sharingEnvironmentConfig"` } // The set of arguments for constructing a DataExchange resource. @@ -248,6 +295,10 @@ type DataExchangeArgs struct { // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project pulumi.StringPtrInput + // Configurable data sharing environment option for a data exchange. + // This field is required for data clean room exchanges. + // Structure is documented below. + SharingEnvironmentConfig DataExchangeSharingEnvironmentConfigPtrInput } func (DataExchangeArgs) ElementType() reflect.Type { @@ -391,6 +442,13 @@ func (o DataExchangeOutput) Project() pulumi.StringOutput { return o.ApplyT(func(v *DataExchange) pulumi.StringOutput { return v.Project }).(pulumi.StringOutput) } +// Configurable data sharing environment option for a data exchange. +// This field is required for data clean room exchanges. +// Structure is documented below. +func (o DataExchangeOutput) SharingEnvironmentConfig() DataExchangeSharingEnvironmentConfigOutput { + return o.ApplyT(func(v *DataExchange) DataExchangeSharingEnvironmentConfigOutput { return v.SharingEnvironmentConfig }).(DataExchangeSharingEnvironmentConfigOutput) +} + type DataExchangeArrayOutput struct{ *pulumi.OutputState } func (DataExchangeArrayOutput) ElementType() reflect.Type { diff --git a/sdk/go/gcp/bigqueryanalyticshub/listing.go b/sdk/go/gcp/bigqueryanalyticshub/listing.go index 95eb2c1df1..3f330ddc9f 100644 --- a/sdk/go/gcp/bigqueryanalyticshub/listing.go +++ b/sdk/go/gcp/bigqueryanalyticshub/listing.go @@ -128,6 +128,96 @@ import ( // } // // ``` +// ### Bigquery Analyticshub Listing Dcr +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigqueryanalyticshub" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// listing, err := bigqueryanalyticshub.NewDataExchange(ctx, "listing", &bigqueryanalyticshub.DataExchangeArgs{ +// Location: pulumi.String("US"), +// DataExchangeId: pulumi.String("dcr_data_exchange"), +// DisplayName: pulumi.String("dcr_data_exchange"), +// Description: pulumi.String("example dcr data exchange"), +// SharingEnvironmentConfig: &bigqueryanalyticshub.DataExchangeSharingEnvironmentConfigArgs{ +// DcrExchangeConfig: nil, +// }, +// }) +// if err != nil { +// return err +// } +// listingDataset, err := bigquery.NewDataset(ctx, "listing", &bigquery.DatasetArgs{ +// DatasetId: pulumi.String("dcr_listing"), +// FriendlyName: pulumi.String("dcr_listing"), +// Description: pulumi.String("example dcr data exchange"), +// Location: pulumi.String("US"), +// }) +// if err != nil { +// return err +// } +// listingTable, err := bigquery.NewTable(ctx, "listing", &bigquery.TableArgs{ +// DeletionProtection: pulumi.Bool(false), +// TableId: pulumi.String("dcr_listing"), +// DatasetId: listingDataset.DatasetId, +// Schema: pulumi.String(`[ +// { +// "name": "name", +// "type": "STRING", +// "mode": "NULLABLE" +// }, +// { +// "name": "post_abbr", +// "type": "STRING", +// "mode": "NULLABLE" +// }, +// { +// "name": "date", +// "type": "DATE", +// "mode": "NULLABLE" +// } +// +// ] +// `), +// +// }) +// if err != nil { +// return err +// } +// _, err = bigqueryanalyticshub.NewListing(ctx, "listing", &bigqueryanalyticshub.ListingArgs{ +// Location: pulumi.String("US"), +// DataExchangeId: listing.DataExchangeId, +// ListingId: pulumi.String("dcr_listing"), +// DisplayName: pulumi.String("dcr_listing"), +// Description: pulumi.String("example dcr data exchange"), +// BigqueryDataset: &bigqueryanalyticshub.ListingBigqueryDatasetArgs{ +// Dataset: listingDataset.ID(), +// SelectedResources: bigqueryanalyticshub.ListingBigqueryDatasetSelectedResourceArray{ +// &bigqueryanalyticshub.ListingBigqueryDatasetSelectedResourceArgs{ +// Table: listingTable.ID(), +// }, +// }, +// }, +// RestrictedExportConfig: &bigqueryanalyticshub.ListingRestrictedExportConfigArgs{ +// Enabled: pulumi.Bool(true), +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` // // ## Import // diff --git a/sdk/go/gcp/bigqueryanalyticshub/pulumiTypes.go b/sdk/go/gcp/bigqueryanalyticshub/pulumiTypes.go index 435c6ed054..58d45f3ae7 100644 --- a/sdk/go/gcp/bigqueryanalyticshub/pulumiTypes.go +++ b/sdk/go/gcp/bigqueryanalyticshub/pulumiTypes.go @@ -339,11 +339,408 @@ func (o DataExchangeIamMemberConditionPtrOutput) Title() pulumi.StringPtrOutput }).(pulumi.StringPtrOutput) } +type DataExchangeSharingEnvironmentConfig struct { + // Data Clean Room (DCR), used for privacy-safe and secured data sharing. + DcrExchangeConfig *DataExchangeSharingEnvironmentConfigDcrExchangeConfig `pulumi:"dcrExchangeConfig"` + // Default Analytics Hub data exchange, used for secured data sharing. + DefaultExchangeConfig *DataExchangeSharingEnvironmentConfigDefaultExchangeConfig `pulumi:"defaultExchangeConfig"` +} + +// DataExchangeSharingEnvironmentConfigInput is an input type that accepts DataExchangeSharingEnvironmentConfigArgs and DataExchangeSharingEnvironmentConfigOutput values. +// You can construct a concrete instance of `DataExchangeSharingEnvironmentConfigInput` via: +// +// DataExchangeSharingEnvironmentConfigArgs{...} +type DataExchangeSharingEnvironmentConfigInput interface { + pulumi.Input + + ToDataExchangeSharingEnvironmentConfigOutput() DataExchangeSharingEnvironmentConfigOutput + ToDataExchangeSharingEnvironmentConfigOutputWithContext(context.Context) DataExchangeSharingEnvironmentConfigOutput +} + +type DataExchangeSharingEnvironmentConfigArgs struct { + // Data Clean Room (DCR), used for privacy-safe and secured data sharing. + DcrExchangeConfig DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrInput `pulumi:"dcrExchangeConfig"` + // Default Analytics Hub data exchange, used for secured data sharing. + DefaultExchangeConfig DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrInput `pulumi:"defaultExchangeConfig"` +} + +func (DataExchangeSharingEnvironmentConfigArgs) ElementType() reflect.Type { + return reflect.TypeOf((*DataExchangeSharingEnvironmentConfig)(nil)).Elem() +} + +func (i DataExchangeSharingEnvironmentConfigArgs) ToDataExchangeSharingEnvironmentConfigOutput() DataExchangeSharingEnvironmentConfigOutput { + return i.ToDataExchangeSharingEnvironmentConfigOutputWithContext(context.Background()) +} + +func (i DataExchangeSharingEnvironmentConfigArgs) ToDataExchangeSharingEnvironmentConfigOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataExchangeSharingEnvironmentConfigOutput) +} + +func (i DataExchangeSharingEnvironmentConfigArgs) ToDataExchangeSharingEnvironmentConfigPtrOutput() DataExchangeSharingEnvironmentConfigPtrOutput { + return i.ToDataExchangeSharingEnvironmentConfigPtrOutputWithContext(context.Background()) +} + +func (i DataExchangeSharingEnvironmentConfigArgs) ToDataExchangeSharingEnvironmentConfigPtrOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataExchangeSharingEnvironmentConfigOutput).ToDataExchangeSharingEnvironmentConfigPtrOutputWithContext(ctx) +} + +// DataExchangeSharingEnvironmentConfigPtrInput is an input type that accepts DataExchangeSharingEnvironmentConfigArgs, DataExchangeSharingEnvironmentConfigPtr and DataExchangeSharingEnvironmentConfigPtrOutput values. +// You can construct a concrete instance of `DataExchangeSharingEnvironmentConfigPtrInput` via: +// +// DataExchangeSharingEnvironmentConfigArgs{...} +// +// or: +// +// nil +type DataExchangeSharingEnvironmentConfigPtrInput interface { + pulumi.Input + + ToDataExchangeSharingEnvironmentConfigPtrOutput() DataExchangeSharingEnvironmentConfigPtrOutput + ToDataExchangeSharingEnvironmentConfigPtrOutputWithContext(context.Context) DataExchangeSharingEnvironmentConfigPtrOutput +} + +type dataExchangeSharingEnvironmentConfigPtrType DataExchangeSharingEnvironmentConfigArgs + +func DataExchangeSharingEnvironmentConfigPtr(v *DataExchangeSharingEnvironmentConfigArgs) DataExchangeSharingEnvironmentConfigPtrInput { + return (*dataExchangeSharingEnvironmentConfigPtrType)(v) +} + +func (*dataExchangeSharingEnvironmentConfigPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**DataExchangeSharingEnvironmentConfig)(nil)).Elem() +} + +func (i *dataExchangeSharingEnvironmentConfigPtrType) ToDataExchangeSharingEnvironmentConfigPtrOutput() DataExchangeSharingEnvironmentConfigPtrOutput { + return i.ToDataExchangeSharingEnvironmentConfigPtrOutputWithContext(context.Background()) +} + +func (i *dataExchangeSharingEnvironmentConfigPtrType) ToDataExchangeSharingEnvironmentConfigPtrOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataExchangeSharingEnvironmentConfigPtrOutput) +} + +type DataExchangeSharingEnvironmentConfigOutput struct{ *pulumi.OutputState } + +func (DataExchangeSharingEnvironmentConfigOutput) ElementType() reflect.Type { + return reflect.TypeOf((*DataExchangeSharingEnvironmentConfig)(nil)).Elem() +} + +func (o DataExchangeSharingEnvironmentConfigOutput) ToDataExchangeSharingEnvironmentConfigOutput() DataExchangeSharingEnvironmentConfigOutput { + return o +} + +func (o DataExchangeSharingEnvironmentConfigOutput) ToDataExchangeSharingEnvironmentConfigOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigOutput { + return o +} + +func (o DataExchangeSharingEnvironmentConfigOutput) ToDataExchangeSharingEnvironmentConfigPtrOutput() DataExchangeSharingEnvironmentConfigPtrOutput { + return o.ToDataExchangeSharingEnvironmentConfigPtrOutputWithContext(context.Background()) +} + +func (o DataExchangeSharingEnvironmentConfigOutput) ToDataExchangeSharingEnvironmentConfigPtrOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v DataExchangeSharingEnvironmentConfig) *DataExchangeSharingEnvironmentConfig { + return &v + }).(DataExchangeSharingEnvironmentConfigPtrOutput) +} + +// Data Clean Room (DCR), used for privacy-safe and secured data sharing. +func (o DataExchangeSharingEnvironmentConfigOutput) DcrExchangeConfig() DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput { + return o.ApplyT(func(v DataExchangeSharingEnvironmentConfig) *DataExchangeSharingEnvironmentConfigDcrExchangeConfig { + return v.DcrExchangeConfig + }).(DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput) +} + +// Default Analytics Hub data exchange, used for secured data sharing. +func (o DataExchangeSharingEnvironmentConfigOutput) DefaultExchangeConfig() DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput { + return o.ApplyT(func(v DataExchangeSharingEnvironmentConfig) *DataExchangeSharingEnvironmentConfigDefaultExchangeConfig { + return v.DefaultExchangeConfig + }).(DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput) +} + +type DataExchangeSharingEnvironmentConfigPtrOutput struct{ *pulumi.OutputState } + +func (DataExchangeSharingEnvironmentConfigPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**DataExchangeSharingEnvironmentConfig)(nil)).Elem() +} + +func (o DataExchangeSharingEnvironmentConfigPtrOutput) ToDataExchangeSharingEnvironmentConfigPtrOutput() DataExchangeSharingEnvironmentConfigPtrOutput { + return o +} + +func (o DataExchangeSharingEnvironmentConfigPtrOutput) ToDataExchangeSharingEnvironmentConfigPtrOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigPtrOutput { + return o +} + +func (o DataExchangeSharingEnvironmentConfigPtrOutput) Elem() DataExchangeSharingEnvironmentConfigOutput { + return o.ApplyT(func(v *DataExchangeSharingEnvironmentConfig) DataExchangeSharingEnvironmentConfig { + if v != nil { + return *v + } + var ret DataExchangeSharingEnvironmentConfig + return ret + }).(DataExchangeSharingEnvironmentConfigOutput) +} + +// Data Clean Room (DCR), used for privacy-safe and secured data sharing. +func (o DataExchangeSharingEnvironmentConfigPtrOutput) DcrExchangeConfig() DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput { + return o.ApplyT(func(v *DataExchangeSharingEnvironmentConfig) *DataExchangeSharingEnvironmentConfigDcrExchangeConfig { + if v == nil { + return nil + } + return v.DcrExchangeConfig + }).(DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput) +} + +// Default Analytics Hub data exchange, used for secured data sharing. +func (o DataExchangeSharingEnvironmentConfigPtrOutput) DefaultExchangeConfig() DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput { + return o.ApplyT(func(v *DataExchangeSharingEnvironmentConfig) *DataExchangeSharingEnvironmentConfigDefaultExchangeConfig { + if v == nil { + return nil + } + return v.DefaultExchangeConfig + }).(DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput) +} + +type DataExchangeSharingEnvironmentConfigDcrExchangeConfig struct { +} + +// DataExchangeSharingEnvironmentConfigDcrExchangeConfigInput is an input type that accepts DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs and DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput values. +// You can construct a concrete instance of `DataExchangeSharingEnvironmentConfigDcrExchangeConfigInput` via: +// +// DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs{...} +type DataExchangeSharingEnvironmentConfigDcrExchangeConfigInput interface { + pulumi.Input + + ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput() DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput + ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigOutputWithContext(context.Context) DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput +} + +type DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs struct { +} + +func (DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs) ElementType() reflect.Type { + return reflect.TypeOf((*DataExchangeSharingEnvironmentConfigDcrExchangeConfig)(nil)).Elem() +} + +func (i DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs) ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput() DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput { + return i.ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigOutputWithContext(context.Background()) +} + +func (i DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs) ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput) +} + +func (i DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs) ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput() DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput { + return i.ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutputWithContext(context.Background()) +} + +func (i DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs) ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput).ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutputWithContext(ctx) +} + +// DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrInput is an input type that accepts DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs, DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtr and DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput values. +// You can construct a concrete instance of `DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrInput` via: +// +// DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs{...} +// +// or: +// +// nil +type DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrInput interface { + pulumi.Input + + ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput() DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput + ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutputWithContext(context.Context) DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput +} + +type dataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrType DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs + +func DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtr(v *DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs) DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrInput { + return (*dataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrType)(v) +} + +func (*dataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**DataExchangeSharingEnvironmentConfigDcrExchangeConfig)(nil)).Elem() +} + +func (i *dataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrType) ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput() DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput { + return i.ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutputWithContext(context.Background()) +} + +func (i *dataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrType) ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput) +} + +type DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput struct{ *pulumi.OutputState } + +func (DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput) ElementType() reflect.Type { + return reflect.TypeOf((*DataExchangeSharingEnvironmentConfigDcrExchangeConfig)(nil)).Elem() +} + +func (o DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput) ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput() DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput { + return o +} + +func (o DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput) ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput { + return o +} + +func (o DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput) ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput() DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput { + return o.ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutputWithContext(context.Background()) +} + +func (o DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput) ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v DataExchangeSharingEnvironmentConfigDcrExchangeConfig) *DataExchangeSharingEnvironmentConfigDcrExchangeConfig { + return &v + }).(DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput) +} + +type DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput struct{ *pulumi.OutputState } + +func (DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**DataExchangeSharingEnvironmentConfigDcrExchangeConfig)(nil)).Elem() +} + +func (o DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput) ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput() DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput { + return o +} + +func (o DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput) ToDataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput { + return o +} + +func (o DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput) Elem() DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput { + return o.ApplyT(func(v *DataExchangeSharingEnvironmentConfigDcrExchangeConfig) DataExchangeSharingEnvironmentConfigDcrExchangeConfig { + if v != nil { + return *v + } + var ret DataExchangeSharingEnvironmentConfigDcrExchangeConfig + return ret + }).(DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput) +} + +type DataExchangeSharingEnvironmentConfigDefaultExchangeConfig struct { +} + +// DataExchangeSharingEnvironmentConfigDefaultExchangeConfigInput is an input type that accepts DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs and DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput values. +// You can construct a concrete instance of `DataExchangeSharingEnvironmentConfigDefaultExchangeConfigInput` via: +// +// DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs{...} +type DataExchangeSharingEnvironmentConfigDefaultExchangeConfigInput interface { + pulumi.Input + + ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput() DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput + ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutputWithContext(context.Context) DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput +} + +type DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs struct { +} + +func (DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs) ElementType() reflect.Type { + return reflect.TypeOf((*DataExchangeSharingEnvironmentConfigDefaultExchangeConfig)(nil)).Elem() +} + +func (i DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs) ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput() DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput { + return i.ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutputWithContext(context.Background()) +} + +func (i DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs) ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput) +} + +func (i DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs) ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput() DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput { + return i.ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutputWithContext(context.Background()) +} + +func (i DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs) ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput).ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutputWithContext(ctx) +} + +// DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrInput is an input type that accepts DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs, DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtr and DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput values. +// You can construct a concrete instance of `DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrInput` via: +// +// DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs{...} +// +// or: +// +// nil +type DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrInput interface { + pulumi.Input + + ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput() DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput + ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutputWithContext(context.Context) DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput +} + +type dataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrType DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs + +func DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtr(v *DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs) DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrInput { + return (*dataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrType)(v) +} + +func (*dataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**DataExchangeSharingEnvironmentConfigDefaultExchangeConfig)(nil)).Elem() +} + +func (i *dataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrType) ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput() DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput { + return i.ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutputWithContext(context.Background()) +} + +func (i *dataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrType) ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput) +} + +type DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput struct{ *pulumi.OutputState } + +func (DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput) ElementType() reflect.Type { + return reflect.TypeOf((*DataExchangeSharingEnvironmentConfigDefaultExchangeConfig)(nil)).Elem() +} + +func (o DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput) ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput() DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput { + return o +} + +func (o DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput) ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput { + return o +} + +func (o DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput) ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput() DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput { + return o.ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutputWithContext(context.Background()) +} + +func (o DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput) ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v DataExchangeSharingEnvironmentConfigDefaultExchangeConfig) *DataExchangeSharingEnvironmentConfigDefaultExchangeConfig { + return &v + }).(DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput) +} + +type DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput struct{ *pulumi.OutputState } + +func (DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**DataExchangeSharingEnvironmentConfigDefaultExchangeConfig)(nil)).Elem() +} + +func (o DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput) ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput() DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput { + return o +} + +func (o DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput) ToDataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutputWithContext(ctx context.Context) DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput { + return o +} + +func (o DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput) Elem() DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput { + return o.ApplyT(func(v *DataExchangeSharingEnvironmentConfigDefaultExchangeConfig) DataExchangeSharingEnvironmentConfigDefaultExchangeConfig { + if v != nil { + return *v + } + var ret DataExchangeSharingEnvironmentConfigDefaultExchangeConfig + return ret + }).(DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput) +} + type ListingBigqueryDataset struct { // Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 - // - // *** Dataset string `pulumi:"dataset"` + // Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + // Structure is documented below. + SelectedResources []ListingBigqueryDatasetSelectedResource `pulumi:"selectedResources"` } // ListingBigqueryDatasetInput is an input type that accepts ListingBigqueryDatasetArgs and ListingBigqueryDatasetOutput values. @@ -359,9 +756,10 @@ type ListingBigqueryDatasetInput interface { type ListingBigqueryDatasetArgs struct { // Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 - // - // *** Dataset pulumi.StringInput `pulumi:"dataset"` + // Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + // Structure is documented below. + SelectedResources ListingBigqueryDatasetSelectedResourceArrayInput `pulumi:"selectedResources"` } func (ListingBigqueryDatasetArgs) ElementType() reflect.Type { @@ -442,12 +840,16 @@ func (o ListingBigqueryDatasetOutput) ToListingBigqueryDatasetPtrOutputWithConte } // Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 -// -// *** func (o ListingBigqueryDatasetOutput) Dataset() pulumi.StringOutput { return o.ApplyT(func(v ListingBigqueryDataset) string { return v.Dataset }).(pulumi.StringOutput) } +// Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. +// Structure is documented below. +func (o ListingBigqueryDatasetOutput) SelectedResources() ListingBigqueryDatasetSelectedResourceArrayOutput { + return o.ApplyT(func(v ListingBigqueryDataset) []ListingBigqueryDatasetSelectedResource { return v.SelectedResources }).(ListingBigqueryDatasetSelectedResourceArrayOutput) +} + type ListingBigqueryDatasetPtrOutput struct{ *pulumi.OutputState } func (ListingBigqueryDatasetPtrOutput) ElementType() reflect.Type { @@ -473,8 +875,6 @@ func (o ListingBigqueryDatasetPtrOutput) Elem() ListingBigqueryDatasetOutput { } // Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 -// -// *** func (o ListingBigqueryDatasetPtrOutput) Dataset() pulumi.StringPtrOutput { return o.ApplyT(func(v *ListingBigqueryDataset) *string { if v == nil { @@ -484,6 +884,120 @@ func (o ListingBigqueryDatasetPtrOutput) Dataset() pulumi.StringPtrOutput { }).(pulumi.StringPtrOutput) } +// Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. +// Structure is documented below. +func (o ListingBigqueryDatasetPtrOutput) SelectedResources() ListingBigqueryDatasetSelectedResourceArrayOutput { + return o.ApplyT(func(v *ListingBigqueryDataset) []ListingBigqueryDatasetSelectedResource { + if v == nil { + return nil + } + return v.SelectedResources + }).(ListingBigqueryDatasetSelectedResourceArrayOutput) +} + +type ListingBigqueryDatasetSelectedResource struct { + // Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + // + // *** + Table *string `pulumi:"table"` +} + +// ListingBigqueryDatasetSelectedResourceInput is an input type that accepts ListingBigqueryDatasetSelectedResourceArgs and ListingBigqueryDatasetSelectedResourceOutput values. +// You can construct a concrete instance of `ListingBigqueryDatasetSelectedResourceInput` via: +// +// ListingBigqueryDatasetSelectedResourceArgs{...} +type ListingBigqueryDatasetSelectedResourceInput interface { + pulumi.Input + + ToListingBigqueryDatasetSelectedResourceOutput() ListingBigqueryDatasetSelectedResourceOutput + ToListingBigqueryDatasetSelectedResourceOutputWithContext(context.Context) ListingBigqueryDatasetSelectedResourceOutput +} + +type ListingBigqueryDatasetSelectedResourceArgs struct { + // Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + // + // *** + Table pulumi.StringPtrInput `pulumi:"table"` +} + +func (ListingBigqueryDatasetSelectedResourceArgs) ElementType() reflect.Type { + return reflect.TypeOf((*ListingBigqueryDatasetSelectedResource)(nil)).Elem() +} + +func (i ListingBigqueryDatasetSelectedResourceArgs) ToListingBigqueryDatasetSelectedResourceOutput() ListingBigqueryDatasetSelectedResourceOutput { + return i.ToListingBigqueryDatasetSelectedResourceOutputWithContext(context.Background()) +} + +func (i ListingBigqueryDatasetSelectedResourceArgs) ToListingBigqueryDatasetSelectedResourceOutputWithContext(ctx context.Context) ListingBigqueryDatasetSelectedResourceOutput { + return pulumi.ToOutputWithContext(ctx, i).(ListingBigqueryDatasetSelectedResourceOutput) +} + +// ListingBigqueryDatasetSelectedResourceArrayInput is an input type that accepts ListingBigqueryDatasetSelectedResourceArray and ListingBigqueryDatasetSelectedResourceArrayOutput values. +// You can construct a concrete instance of `ListingBigqueryDatasetSelectedResourceArrayInput` via: +// +// ListingBigqueryDatasetSelectedResourceArray{ ListingBigqueryDatasetSelectedResourceArgs{...} } +type ListingBigqueryDatasetSelectedResourceArrayInput interface { + pulumi.Input + + ToListingBigqueryDatasetSelectedResourceArrayOutput() ListingBigqueryDatasetSelectedResourceArrayOutput + ToListingBigqueryDatasetSelectedResourceArrayOutputWithContext(context.Context) ListingBigqueryDatasetSelectedResourceArrayOutput +} + +type ListingBigqueryDatasetSelectedResourceArray []ListingBigqueryDatasetSelectedResourceInput + +func (ListingBigqueryDatasetSelectedResourceArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]ListingBigqueryDatasetSelectedResource)(nil)).Elem() +} + +func (i ListingBigqueryDatasetSelectedResourceArray) ToListingBigqueryDatasetSelectedResourceArrayOutput() ListingBigqueryDatasetSelectedResourceArrayOutput { + return i.ToListingBigqueryDatasetSelectedResourceArrayOutputWithContext(context.Background()) +} + +func (i ListingBigqueryDatasetSelectedResourceArray) ToListingBigqueryDatasetSelectedResourceArrayOutputWithContext(ctx context.Context) ListingBigqueryDatasetSelectedResourceArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(ListingBigqueryDatasetSelectedResourceArrayOutput) +} + +type ListingBigqueryDatasetSelectedResourceOutput struct{ *pulumi.OutputState } + +func (ListingBigqueryDatasetSelectedResourceOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ListingBigqueryDatasetSelectedResource)(nil)).Elem() +} + +func (o ListingBigqueryDatasetSelectedResourceOutput) ToListingBigqueryDatasetSelectedResourceOutput() ListingBigqueryDatasetSelectedResourceOutput { + return o +} + +func (o ListingBigqueryDatasetSelectedResourceOutput) ToListingBigqueryDatasetSelectedResourceOutputWithContext(ctx context.Context) ListingBigqueryDatasetSelectedResourceOutput { + return o +} + +// Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" +// +// *** +func (o ListingBigqueryDatasetSelectedResourceOutput) Table() pulumi.StringPtrOutput { + return o.ApplyT(func(v ListingBigqueryDatasetSelectedResource) *string { return v.Table }).(pulumi.StringPtrOutput) +} + +type ListingBigqueryDatasetSelectedResourceArrayOutput struct{ *pulumi.OutputState } + +func (ListingBigqueryDatasetSelectedResourceArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]ListingBigqueryDatasetSelectedResource)(nil)).Elem() +} + +func (o ListingBigqueryDatasetSelectedResourceArrayOutput) ToListingBigqueryDatasetSelectedResourceArrayOutput() ListingBigqueryDatasetSelectedResourceArrayOutput { + return o +} + +func (o ListingBigqueryDatasetSelectedResourceArrayOutput) ToListingBigqueryDatasetSelectedResourceArrayOutputWithContext(ctx context.Context) ListingBigqueryDatasetSelectedResourceArrayOutput { + return o +} + +func (o ListingBigqueryDatasetSelectedResourceArrayOutput) Index(i pulumi.IntInput) ListingBigqueryDatasetSelectedResourceOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) ListingBigqueryDatasetSelectedResource { + return vs[0].([]ListingBigqueryDatasetSelectedResource)[vs[1].(int)] + }).(ListingBigqueryDatasetSelectedResourceOutput) +} + type ListingDataProvider struct { // Name of the data provider. Name string `pulumi:"name"` @@ -1125,6 +1639,9 @@ func (o ListingPublisherPtrOutput) PrimaryContact() pulumi.StringPtrOutput { type ListingRestrictedExportConfig struct { // If true, enable restricted export. Enabled *bool `pulumi:"enabled"` + // (Output) + // If true, restrict direct table access(read api/tabledata.list) on linked table. + RestrictDirectTableAccess *bool `pulumi:"restrictDirectTableAccess"` // If true, restrict export of query result derived from restricted linked dataset table. RestrictQueryResult *bool `pulumi:"restrictQueryResult"` } @@ -1143,6 +1660,9 @@ type ListingRestrictedExportConfigInput interface { type ListingRestrictedExportConfigArgs struct { // If true, enable restricted export. Enabled pulumi.BoolPtrInput `pulumi:"enabled"` + // (Output) + // If true, restrict direct table access(read api/tabledata.list) on linked table. + RestrictDirectTableAccess pulumi.BoolPtrInput `pulumi:"restrictDirectTableAccess"` // If true, restrict export of query result derived from restricted linked dataset table. RestrictQueryResult pulumi.BoolPtrInput `pulumi:"restrictQueryResult"` } @@ -1229,6 +1749,12 @@ func (o ListingRestrictedExportConfigOutput) Enabled() pulumi.BoolPtrOutput { return o.ApplyT(func(v ListingRestrictedExportConfig) *bool { return v.Enabled }).(pulumi.BoolPtrOutput) } +// (Output) +// If true, restrict direct table access(read api/tabledata.list) on linked table. +func (o ListingRestrictedExportConfigOutput) RestrictDirectTableAccess() pulumi.BoolPtrOutput { + return o.ApplyT(func(v ListingRestrictedExportConfig) *bool { return v.RestrictDirectTableAccess }).(pulumi.BoolPtrOutput) +} + // If true, restrict export of query result derived from restricted linked dataset table. func (o ListingRestrictedExportConfigOutput) RestrictQueryResult() pulumi.BoolPtrOutput { return o.ApplyT(func(v ListingRestrictedExportConfig) *bool { return v.RestrictQueryResult }).(pulumi.BoolPtrOutput) @@ -1268,6 +1794,17 @@ func (o ListingRestrictedExportConfigPtrOutput) Enabled() pulumi.BoolPtrOutput { }).(pulumi.BoolPtrOutput) } +// (Output) +// If true, restrict direct table access(read api/tabledata.list) on linked table. +func (o ListingRestrictedExportConfigPtrOutput) RestrictDirectTableAccess() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *ListingRestrictedExportConfig) *bool { + if v == nil { + return nil + } + return v.RestrictDirectTableAccess + }).(pulumi.BoolPtrOutput) +} + // If true, restrict export of query result derived from restricted linked dataset table. func (o ListingRestrictedExportConfigPtrOutput) RestrictQueryResult() pulumi.BoolPtrOutput { return o.ApplyT(func(v *ListingRestrictedExportConfig) *bool { @@ -1283,8 +1820,16 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*DataExchangeIamBindingConditionPtrInput)(nil)).Elem(), DataExchangeIamBindingConditionArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataExchangeIamMemberConditionInput)(nil)).Elem(), DataExchangeIamMemberConditionArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataExchangeIamMemberConditionPtrInput)(nil)).Elem(), DataExchangeIamMemberConditionArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataExchangeSharingEnvironmentConfigInput)(nil)).Elem(), DataExchangeSharingEnvironmentConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataExchangeSharingEnvironmentConfigPtrInput)(nil)).Elem(), DataExchangeSharingEnvironmentConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataExchangeSharingEnvironmentConfigDcrExchangeConfigInput)(nil)).Elem(), DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrInput)(nil)).Elem(), DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataExchangeSharingEnvironmentConfigDefaultExchangeConfigInput)(nil)).Elem(), DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrInput)(nil)).Elem(), DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ListingBigqueryDatasetInput)(nil)).Elem(), ListingBigqueryDatasetArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ListingBigqueryDatasetPtrInput)(nil)).Elem(), ListingBigqueryDatasetArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ListingBigqueryDatasetSelectedResourceInput)(nil)).Elem(), ListingBigqueryDatasetSelectedResourceArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ListingBigqueryDatasetSelectedResourceArrayInput)(nil)).Elem(), ListingBigqueryDatasetSelectedResourceArray{}) pulumi.RegisterInputType(reflect.TypeOf((*ListingDataProviderInput)(nil)).Elem(), ListingDataProviderArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ListingDataProviderPtrInput)(nil)).Elem(), ListingDataProviderArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ListingIamBindingConditionInput)(nil)).Elem(), ListingIamBindingConditionArgs{}) @@ -1299,8 +1844,16 @@ func init() { pulumi.RegisterOutputType(DataExchangeIamBindingConditionPtrOutput{}) pulumi.RegisterOutputType(DataExchangeIamMemberConditionOutput{}) pulumi.RegisterOutputType(DataExchangeIamMemberConditionPtrOutput{}) + pulumi.RegisterOutputType(DataExchangeSharingEnvironmentConfigOutput{}) + pulumi.RegisterOutputType(DataExchangeSharingEnvironmentConfigPtrOutput{}) + pulumi.RegisterOutputType(DataExchangeSharingEnvironmentConfigDcrExchangeConfigOutput{}) + pulumi.RegisterOutputType(DataExchangeSharingEnvironmentConfigDcrExchangeConfigPtrOutput{}) + pulumi.RegisterOutputType(DataExchangeSharingEnvironmentConfigDefaultExchangeConfigOutput{}) + pulumi.RegisterOutputType(DataExchangeSharingEnvironmentConfigDefaultExchangeConfigPtrOutput{}) pulumi.RegisterOutputType(ListingBigqueryDatasetOutput{}) pulumi.RegisterOutputType(ListingBigqueryDatasetPtrOutput{}) + pulumi.RegisterOutputType(ListingBigqueryDatasetSelectedResourceOutput{}) + pulumi.RegisterOutputType(ListingBigqueryDatasetSelectedResourceArrayOutput{}) pulumi.RegisterOutputType(ListingDataProviderOutput{}) pulumi.RegisterOutputType(ListingDataProviderPtrOutput{}) pulumi.RegisterOutputType(ListingIamBindingConditionOutput{}) diff --git a/sdk/go/gcp/bigtable/pulumiTypes.go b/sdk/go/gcp/bigtable/pulumiTypes.go index dc34e49aca..79d4ead7ec 100644 --- a/sdk/go/gcp/bigtable/pulumiTypes.go +++ b/sdk/go/gcp/bigtable/pulumiTypes.go @@ -1450,6 +1450,8 @@ func (o TableAutomatedBackupPolicyPtrOutput) RetentionPeriod() pulumi.StringPtrO type TableColumnFamily struct { // The name of the column family. Family string `pulumi:"family"` + // The type of the column family. + Type *string `pulumi:"type"` } // TableColumnFamilyInput is an input type that accepts TableColumnFamilyArgs and TableColumnFamilyOutput values. @@ -1466,6 +1468,8 @@ type TableColumnFamilyInput interface { type TableColumnFamilyArgs struct { // The name of the column family. Family pulumi.StringInput `pulumi:"family"` + // The type of the column family. + Type pulumi.StringPtrInput `pulumi:"type"` } func (TableColumnFamilyArgs) ElementType() reflect.Type { @@ -1524,6 +1528,11 @@ func (o TableColumnFamilyOutput) Family() pulumi.StringOutput { return o.ApplyT(func(v TableColumnFamily) string { return v.Family }).(pulumi.StringOutput) } +// The type of the column family. +func (o TableColumnFamilyOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v TableColumnFamily) *string { return v.Type }).(pulumi.StringPtrOutput) +} + type TableColumnFamilyArrayOutput struct{ *pulumi.OutputState } func (TableColumnFamilyArrayOutput) ElementType() reflect.Type { diff --git a/sdk/go/gcp/bigtable/table.go b/sdk/go/gcp/bigtable/table.go index ce8b992bc3..f17b8e6f6e 100644 --- a/sdk/go/gcp/bigtable/table.go +++ b/sdk/go/gcp/bigtable/table.go @@ -58,6 +58,25 @@ import ( // }, // &bigtable.TableColumnFamilyArgs{ // Family: pulumi.String("family-second"), +// Type: pulumi.String("intsum"), +// }, +// &bigtable.TableColumnFamilyArgs{ +// Family: pulumi.String("family-third"), +// Type: pulumi.String(` { +// "aggregateType": { +// "max": {}, +// "inputType": { +// "int64Type": { +// "encoding": { +// "bigEndianBytes": {} +// } +// } +// } +// } +// } +// +// `), +// // }, // }, // ChangeStreamRetention: pulumi.String("24h0m0s"), diff --git a/sdk/go/gcp/certificateauthority/authority.go b/sdk/go/gcp/certificateauthority/authority.go index 445e0de6ca..377cd0233e 100644 --- a/sdk/go/gcp/certificateauthority/authority.go +++ b/sdk/go/gcp/certificateauthority/authority.go @@ -433,7 +433,8 @@ type Authority struct { // fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". CreateTime pulumi.StringOutput `pulumi:"createTime"` DeletionProtection pulumi.BoolPtrOutput `pulumi:"deletionProtection"` - // Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + // Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + // ENABLED, DISABLED, STAGED. DesiredState pulumi.StringPtrOutput `pulumi:"desiredState"` // All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. EffectiveLabels pulumi.StringMapOutput `pulumi:"effectiveLabels"` @@ -558,7 +559,8 @@ type authorityState struct { // fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". CreateTime *string `pulumi:"createTime"` DeletionProtection *bool `pulumi:"deletionProtection"` - // Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + // Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + // ENABLED, DISABLED, STAGED. DesiredState *string `pulumi:"desiredState"` // All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. EffectiveLabels map[string]string `pulumi:"effectiveLabels"` @@ -634,7 +636,8 @@ type AuthorityState struct { // fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". CreateTime pulumi.StringPtrInput DeletionProtection pulumi.BoolPtrInput - // Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + // Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + // ENABLED, DISABLED, STAGED. DesiredState pulumi.StringPtrInput // All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. EffectiveLabels pulumi.StringMapInput @@ -707,7 +710,8 @@ type authorityArgs struct { // Structure is documented below. Config AuthorityConfig `pulumi:"config"` DeletionProtection *bool `pulumi:"deletionProtection"` - // Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + // Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + // ENABLED, DISABLED, STAGED. DesiredState *string `pulumi:"desiredState"` // The name of a Cloud Storage bucket where this CertificateAuthority will publish content, such as the CA certificate and // CRLs. This must be a bucket name, without any prefixes (such as 'gs://') or suffixes (such as '.googleapis.com'). For @@ -758,7 +762,8 @@ type AuthorityArgs struct { // Structure is documented below. Config AuthorityConfigInput DeletionProtection pulumi.BoolPtrInput - // Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + // Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + // ENABLED, DISABLED, STAGED. DesiredState pulumi.StringPtrInput // The name of a Cloud Storage bucket where this CertificateAuthority will publish content, such as the CA certificate and // CRLs. This must be a bucket name, without any prefixes (such as 'gs://') or suffixes (such as '.googleapis.com'). For @@ -916,7 +921,8 @@ func (o AuthorityOutput) DeletionProtection() pulumi.BoolPtrOutput { return o.ApplyT(func(v *Authority) pulumi.BoolPtrOutput { return v.DeletionProtection }).(pulumi.BoolPtrOutput) } -// Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. +// Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: +// ENABLED, DISABLED, STAGED. func (o AuthorityOutput) DesiredState() pulumi.StringPtrOutput { return o.ApplyT(func(v *Authority) pulumi.StringPtrOutput { return v.DesiredState }).(pulumi.StringPtrOutput) } diff --git a/sdk/go/gcp/certificatemanager/certificate.go b/sdk/go/gcp/certificatemanager/certificate.go index 89360255a9..e26c9793d6 100644 --- a/sdk/go/gcp/certificatemanager/certificate.go +++ b/sdk/go/gcp/certificatemanager/certificate.go @@ -521,6 +521,8 @@ type Certificate struct { // The combination of labels configured directly on the resource // and default labels configured on the provider. PulumiLabels pulumi.StringMapOutput `pulumi:"pulumiLabels"` + // The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + SanDnsnames pulumi.StringArrayOutput `pulumi:"sanDnsnames"` // The scope of the certificate. // DEFAULT: Certificates with default scope are served from core Google data centers. // If unsure, choose this option. @@ -598,6 +600,8 @@ type certificateState struct { // The combination of labels configured directly on the resource // and default labels configured on the provider. PulumiLabels map[string]string `pulumi:"pulumiLabels"` + // The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + SanDnsnames []string `pulumi:"sanDnsnames"` // The scope of the certificate. // DEFAULT: Certificates with default scope are served from core Google data centers. // If unsure, choose this option. @@ -641,6 +645,8 @@ type CertificateState struct { // The combination of labels configured directly on the resource // and default labels configured on the provider. PulumiLabels pulumi.StringMapInput + // The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + SanDnsnames pulumi.StringArrayInput // The scope of the certificate. // DEFAULT: Certificates with default scope are served from core Google data centers. // If unsure, choose this option. @@ -875,6 +881,11 @@ func (o CertificateOutput) PulumiLabels() pulumi.StringMapOutput { return o.ApplyT(func(v *Certificate) pulumi.StringMapOutput { return v.PulumiLabels }).(pulumi.StringMapOutput) } +// The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) +func (o CertificateOutput) SanDnsnames() pulumi.StringArrayOutput { + return o.ApplyT(func(v *Certificate) pulumi.StringArrayOutput { return v.SanDnsnames }).(pulumi.StringArrayOutput) +} + // The scope of the certificate. // DEFAULT: Certificates with default scope are served from core Google data centers. // If unsure, choose this option. diff --git a/sdk/go/gcp/certificatemanager/getCertificates.go b/sdk/go/gcp/certificatemanager/getCertificates.go new file mode 100644 index 0000000000..f1dd6d9805 --- /dev/null +++ b/sdk/go/gcp/certificatemanager/getCertificates.go @@ -0,0 +1,151 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package certificatemanager + +import ( + "context" + "reflect" + + "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// List all certificates within Google Certificate Manager for a given project, region or filter. +// +// ## Example Usage +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/certificatemanager" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := certificatemanager.GetCertificates(ctx, nil, nil) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// +// ### With A Filter +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/certificatemanager" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := certificatemanager.GetCertificates(ctx, &certificatemanager.GetCertificatesArgs{ +// Filter: pulumi.StringRef("name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*"), +// }, nil) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +func GetCertificates(ctx *pulumi.Context, args *GetCertificatesArgs, opts ...pulumi.InvokeOption) (*GetCertificatesResult, error) { + opts = internal.PkgInvokeDefaultOpts(opts) + var rv GetCertificatesResult + err := ctx.Invoke("gcp:certificatemanager/getCertificates:getCertificates", args, &rv, opts...) + if err != nil { + return nil, err + } + return &rv, nil +} + +// A collection of arguments for invoking getCertificates. +type GetCertificatesArgs struct { + // Filter expression to restrict the certificates returned. + Filter *string `pulumi:"filter"` + // The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + Region *string `pulumi:"region"` +} + +// A collection of values returned by getCertificates. +type GetCertificatesResult struct { + Certificates []GetCertificatesCertificate `pulumi:"certificates"` + Filter *string `pulumi:"filter"` + // The provider-assigned unique ID for this managed resource. + Id string `pulumi:"id"` + Region *string `pulumi:"region"` +} + +func GetCertificatesOutput(ctx *pulumi.Context, args GetCertificatesOutputArgs, opts ...pulumi.InvokeOption) GetCertificatesResultOutput { + return pulumi.ToOutputWithContext(context.Background(), args). + ApplyT(func(v interface{}) (GetCertificatesResult, error) { + args := v.(GetCertificatesArgs) + r, err := GetCertificates(ctx, &args, opts...) + var s GetCertificatesResult + if r != nil { + s = *r + } + return s, err + }).(GetCertificatesResultOutput) +} + +// A collection of arguments for invoking getCertificates. +type GetCertificatesOutputArgs struct { + // Filter expression to restrict the certificates returned. + Filter pulumi.StringPtrInput `pulumi:"filter"` + // The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + Region pulumi.StringPtrInput `pulumi:"region"` +} + +func (GetCertificatesOutputArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetCertificatesArgs)(nil)).Elem() +} + +// A collection of values returned by getCertificates. +type GetCertificatesResultOutput struct{ *pulumi.OutputState } + +func (GetCertificatesResultOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetCertificatesResult)(nil)).Elem() +} + +func (o GetCertificatesResultOutput) ToGetCertificatesResultOutput() GetCertificatesResultOutput { + return o +} + +func (o GetCertificatesResultOutput) ToGetCertificatesResultOutputWithContext(ctx context.Context) GetCertificatesResultOutput { + return o +} + +func (o GetCertificatesResultOutput) Certificates() GetCertificatesCertificateArrayOutput { + return o.ApplyT(func(v GetCertificatesResult) []GetCertificatesCertificate { return v.Certificates }).(GetCertificatesCertificateArrayOutput) +} + +func (o GetCertificatesResultOutput) Filter() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCertificatesResult) *string { return v.Filter }).(pulumi.StringPtrOutput) +} + +// The provider-assigned unique ID for this managed resource. +func (o GetCertificatesResultOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v GetCertificatesResult) string { return v.Id }).(pulumi.StringOutput) +} + +func (o GetCertificatesResultOutput) Region() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCertificatesResult) *string { return v.Region }).(pulumi.StringPtrOutput) +} + +func init() { + pulumi.RegisterOutputType(GetCertificatesResultOutput{}) +} diff --git a/sdk/go/gcp/certificatemanager/pulumiTypes.go b/sdk/go/gcp/certificatemanager/pulumiTypes.go index 903e250f9b..811f2f5fed 100644 --- a/sdk/go/gcp/certificatemanager/pulumiTypes.go +++ b/sdk/go/gcp/certificatemanager/pulumiTypes.go @@ -2124,6 +2124,636 @@ func (o GetCertificateMapGclbTargetIpConfigArrayOutput) Index(i pulumi.IntInput) }).(GetCertificateMapGclbTargetIpConfigOutput) } +type GetCertificatesCertificate struct { + // A human-readable description of the resource. + Description string `pulumi:"description"` + EffectiveLabels map[string]string `pulumi:"effectiveLabels"` + // Set of label tags associated with the Certificate resource. + // + // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + // Please refer to the field 'effective_labels' for all of the labels present on the resource. + Labels map[string]string `pulumi:"labels"` + // The Certificate Manager location. If not specified, "global" is used. + Location string `pulumi:"location"` + // Configuration and state of a Managed Certificate. + // Certificate Manager provisions and renews Managed Certificates + // automatically, for as long as it's authorized to do so. + Manageds []GetCertificatesCertificateManaged `pulumi:"manageds"` + // A user-defined name of the certificate. Certificate names must be unique + // The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, + // and all following characters must be a dash, underscore, letter or digit. + Name string `pulumi:"name"` + // The ID of the project in which the resource belongs. If it + // is not provided, the provider project is used. + Project string `pulumi:"project"` + // The combination of labels configured directly on the resource + // and default labels configured on the provider. + PulumiLabels map[string]string `pulumi:"pulumiLabels"` + // The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + SanDnsnames []string `pulumi:"sanDnsnames"` + // The scope of the certificate. + // + // DEFAULT: Certificates with default scope are served from core Google data centers. + // If unsure, choose this option. + // + // EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence. + // See https://cloud.google.com/vpc/docs/edge-locations. + // + // ALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs). + // See https://cloud.google.com/compute/docs/regions-zones + Scope string `pulumi:"scope"` +} + +// GetCertificatesCertificateInput is an input type that accepts GetCertificatesCertificateArgs and GetCertificatesCertificateOutput values. +// You can construct a concrete instance of `GetCertificatesCertificateInput` via: +// +// GetCertificatesCertificateArgs{...} +type GetCertificatesCertificateInput interface { + pulumi.Input + + ToGetCertificatesCertificateOutput() GetCertificatesCertificateOutput + ToGetCertificatesCertificateOutputWithContext(context.Context) GetCertificatesCertificateOutput +} + +type GetCertificatesCertificateArgs struct { + // A human-readable description of the resource. + Description pulumi.StringInput `pulumi:"description"` + EffectiveLabels pulumi.StringMapInput `pulumi:"effectiveLabels"` + // Set of label tags associated with the Certificate resource. + // + // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + // Please refer to the field 'effective_labels' for all of the labels present on the resource. + Labels pulumi.StringMapInput `pulumi:"labels"` + // The Certificate Manager location. If not specified, "global" is used. + Location pulumi.StringInput `pulumi:"location"` + // Configuration and state of a Managed Certificate. + // Certificate Manager provisions and renews Managed Certificates + // automatically, for as long as it's authorized to do so. + Manageds GetCertificatesCertificateManagedArrayInput `pulumi:"manageds"` + // A user-defined name of the certificate. Certificate names must be unique + // The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, + // and all following characters must be a dash, underscore, letter or digit. + Name pulumi.StringInput `pulumi:"name"` + // The ID of the project in which the resource belongs. If it + // is not provided, the provider project is used. + Project pulumi.StringInput `pulumi:"project"` + // The combination of labels configured directly on the resource + // and default labels configured on the provider. + PulumiLabels pulumi.StringMapInput `pulumi:"pulumiLabels"` + // The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + SanDnsnames pulumi.StringArrayInput `pulumi:"sanDnsnames"` + // The scope of the certificate. + // + // DEFAULT: Certificates with default scope are served from core Google data centers. + // If unsure, choose this option. + // + // EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence. + // See https://cloud.google.com/vpc/docs/edge-locations. + // + // ALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs). + // See https://cloud.google.com/compute/docs/regions-zones + Scope pulumi.StringInput `pulumi:"scope"` +} + +func (GetCertificatesCertificateArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetCertificatesCertificate)(nil)).Elem() +} + +func (i GetCertificatesCertificateArgs) ToGetCertificatesCertificateOutput() GetCertificatesCertificateOutput { + return i.ToGetCertificatesCertificateOutputWithContext(context.Background()) +} + +func (i GetCertificatesCertificateArgs) ToGetCertificatesCertificateOutputWithContext(ctx context.Context) GetCertificatesCertificateOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCertificatesCertificateOutput) +} + +// GetCertificatesCertificateArrayInput is an input type that accepts GetCertificatesCertificateArray and GetCertificatesCertificateArrayOutput values. +// You can construct a concrete instance of `GetCertificatesCertificateArrayInput` via: +// +// GetCertificatesCertificateArray{ GetCertificatesCertificateArgs{...} } +type GetCertificatesCertificateArrayInput interface { + pulumi.Input + + ToGetCertificatesCertificateArrayOutput() GetCertificatesCertificateArrayOutput + ToGetCertificatesCertificateArrayOutputWithContext(context.Context) GetCertificatesCertificateArrayOutput +} + +type GetCertificatesCertificateArray []GetCertificatesCertificateInput + +func (GetCertificatesCertificateArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCertificatesCertificate)(nil)).Elem() +} + +func (i GetCertificatesCertificateArray) ToGetCertificatesCertificateArrayOutput() GetCertificatesCertificateArrayOutput { + return i.ToGetCertificatesCertificateArrayOutputWithContext(context.Background()) +} + +func (i GetCertificatesCertificateArray) ToGetCertificatesCertificateArrayOutputWithContext(ctx context.Context) GetCertificatesCertificateArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCertificatesCertificateArrayOutput) +} + +type GetCertificatesCertificateOutput struct{ *pulumi.OutputState } + +func (GetCertificatesCertificateOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetCertificatesCertificate)(nil)).Elem() +} + +func (o GetCertificatesCertificateOutput) ToGetCertificatesCertificateOutput() GetCertificatesCertificateOutput { + return o +} + +func (o GetCertificatesCertificateOutput) ToGetCertificatesCertificateOutputWithContext(ctx context.Context) GetCertificatesCertificateOutput { + return o +} + +// A human-readable description of the resource. +func (o GetCertificatesCertificateOutput) Description() pulumi.StringOutput { + return o.ApplyT(func(v GetCertificatesCertificate) string { return v.Description }).(pulumi.StringOutput) +} + +func (o GetCertificatesCertificateOutput) EffectiveLabels() pulumi.StringMapOutput { + return o.ApplyT(func(v GetCertificatesCertificate) map[string]string { return v.EffectiveLabels }).(pulumi.StringMapOutput) +} + +// Set of label tags associated with the Certificate resource. +// +// **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +// Please refer to the field 'effective_labels' for all of the labels present on the resource. +func (o GetCertificatesCertificateOutput) Labels() pulumi.StringMapOutput { + return o.ApplyT(func(v GetCertificatesCertificate) map[string]string { return v.Labels }).(pulumi.StringMapOutput) +} + +// The Certificate Manager location. If not specified, "global" is used. +func (o GetCertificatesCertificateOutput) Location() pulumi.StringOutput { + return o.ApplyT(func(v GetCertificatesCertificate) string { return v.Location }).(pulumi.StringOutput) +} + +// Configuration and state of a Managed Certificate. +// Certificate Manager provisions and renews Managed Certificates +// automatically, for as long as it's authorized to do so. +func (o GetCertificatesCertificateOutput) Manageds() GetCertificatesCertificateManagedArrayOutput { + return o.ApplyT(func(v GetCertificatesCertificate) []GetCertificatesCertificateManaged { return v.Manageds }).(GetCertificatesCertificateManagedArrayOutput) +} + +// A user-defined name of the certificate. Certificate names must be unique +// The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, +// and all following characters must be a dash, underscore, letter or digit. +func (o GetCertificatesCertificateOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v GetCertificatesCertificate) string { return v.Name }).(pulumi.StringOutput) +} + +// The ID of the project in which the resource belongs. If it +// is not provided, the provider project is used. +func (o GetCertificatesCertificateOutput) Project() pulumi.StringOutput { + return o.ApplyT(func(v GetCertificatesCertificate) string { return v.Project }).(pulumi.StringOutput) +} + +// The combination of labels configured directly on the resource +// +// and default labels configured on the provider. +func (o GetCertificatesCertificateOutput) PulumiLabels() pulumi.StringMapOutput { + return o.ApplyT(func(v GetCertificatesCertificate) map[string]string { return v.PulumiLabels }).(pulumi.StringMapOutput) +} + +// The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) +func (o GetCertificatesCertificateOutput) SanDnsnames() pulumi.StringArrayOutput { + return o.ApplyT(func(v GetCertificatesCertificate) []string { return v.SanDnsnames }).(pulumi.StringArrayOutput) +} + +// The scope of the certificate. +// +// DEFAULT: Certificates with default scope are served from core Google data centers. +// If unsure, choose this option. +// +// EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence. +// See https://cloud.google.com/vpc/docs/edge-locations. +// +// ALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs). +// See https://cloud.google.com/compute/docs/regions-zones +func (o GetCertificatesCertificateOutput) Scope() pulumi.StringOutput { + return o.ApplyT(func(v GetCertificatesCertificate) string { return v.Scope }).(pulumi.StringOutput) +} + +type GetCertificatesCertificateArrayOutput struct{ *pulumi.OutputState } + +func (GetCertificatesCertificateArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCertificatesCertificate)(nil)).Elem() +} + +func (o GetCertificatesCertificateArrayOutput) ToGetCertificatesCertificateArrayOutput() GetCertificatesCertificateArrayOutput { + return o +} + +func (o GetCertificatesCertificateArrayOutput) ToGetCertificatesCertificateArrayOutputWithContext(ctx context.Context) GetCertificatesCertificateArrayOutput { + return o +} + +func (o GetCertificatesCertificateArrayOutput) Index(i pulumi.IntInput) GetCertificatesCertificateOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetCertificatesCertificate { + return vs[0].([]GetCertificatesCertificate)[vs[1].(int)] + }).(GetCertificatesCertificateOutput) +} + +type GetCertificatesCertificateManaged struct { + // Detailed state of the latest authorization attempt for each domain + // specified for this Managed Certificate. + AuthorizationAttemptInfos []GetCertificatesCertificateManagedAuthorizationAttemptInfo `pulumi:"authorizationAttemptInfos"` + // Authorizations that will be used for performing domain authorization. Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + DnsAuthorizations []string `pulumi:"dnsAuthorizations"` + // The domains for which a managed SSL certificate will be generated. + // Wildcard domains are only supported with DNS challenge resolution + Domains []string `pulumi:"domains"` + // The resource name for a CertificateIssuanceConfig used to configure private PKI certificates in the format projects/*/locations/*/certificateIssuanceConfigs/*. + // If this field is not set, the certificates will instead be publicly signed as documented at https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa. + // Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + IssuanceConfig string `pulumi:"issuanceConfig"` + // Information about issues with provisioning this Managed Certificate. + ProvisioningIssues []GetCertificatesCertificateManagedProvisioningIssue `pulumi:"provisioningIssues"` + // A state of this Managed Certificate. + State string `pulumi:"state"` +} + +// GetCertificatesCertificateManagedInput is an input type that accepts GetCertificatesCertificateManagedArgs and GetCertificatesCertificateManagedOutput values. +// You can construct a concrete instance of `GetCertificatesCertificateManagedInput` via: +// +// GetCertificatesCertificateManagedArgs{...} +type GetCertificatesCertificateManagedInput interface { + pulumi.Input + + ToGetCertificatesCertificateManagedOutput() GetCertificatesCertificateManagedOutput + ToGetCertificatesCertificateManagedOutputWithContext(context.Context) GetCertificatesCertificateManagedOutput +} + +type GetCertificatesCertificateManagedArgs struct { + // Detailed state of the latest authorization attempt for each domain + // specified for this Managed Certificate. + AuthorizationAttemptInfos GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayInput `pulumi:"authorizationAttemptInfos"` + // Authorizations that will be used for performing domain authorization. Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + DnsAuthorizations pulumi.StringArrayInput `pulumi:"dnsAuthorizations"` + // The domains for which a managed SSL certificate will be generated. + // Wildcard domains are only supported with DNS challenge resolution + Domains pulumi.StringArrayInput `pulumi:"domains"` + // The resource name for a CertificateIssuanceConfig used to configure private PKI certificates in the format projects/*/locations/*/certificateIssuanceConfigs/*. + // If this field is not set, the certificates will instead be publicly signed as documented at https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa. + // Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + IssuanceConfig pulumi.StringInput `pulumi:"issuanceConfig"` + // Information about issues with provisioning this Managed Certificate. + ProvisioningIssues GetCertificatesCertificateManagedProvisioningIssueArrayInput `pulumi:"provisioningIssues"` + // A state of this Managed Certificate. + State pulumi.StringInput `pulumi:"state"` +} + +func (GetCertificatesCertificateManagedArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetCertificatesCertificateManaged)(nil)).Elem() +} + +func (i GetCertificatesCertificateManagedArgs) ToGetCertificatesCertificateManagedOutput() GetCertificatesCertificateManagedOutput { + return i.ToGetCertificatesCertificateManagedOutputWithContext(context.Background()) +} + +func (i GetCertificatesCertificateManagedArgs) ToGetCertificatesCertificateManagedOutputWithContext(ctx context.Context) GetCertificatesCertificateManagedOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCertificatesCertificateManagedOutput) +} + +// GetCertificatesCertificateManagedArrayInput is an input type that accepts GetCertificatesCertificateManagedArray and GetCertificatesCertificateManagedArrayOutput values. +// You can construct a concrete instance of `GetCertificatesCertificateManagedArrayInput` via: +// +// GetCertificatesCertificateManagedArray{ GetCertificatesCertificateManagedArgs{...} } +type GetCertificatesCertificateManagedArrayInput interface { + pulumi.Input + + ToGetCertificatesCertificateManagedArrayOutput() GetCertificatesCertificateManagedArrayOutput + ToGetCertificatesCertificateManagedArrayOutputWithContext(context.Context) GetCertificatesCertificateManagedArrayOutput +} + +type GetCertificatesCertificateManagedArray []GetCertificatesCertificateManagedInput + +func (GetCertificatesCertificateManagedArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCertificatesCertificateManaged)(nil)).Elem() +} + +func (i GetCertificatesCertificateManagedArray) ToGetCertificatesCertificateManagedArrayOutput() GetCertificatesCertificateManagedArrayOutput { + return i.ToGetCertificatesCertificateManagedArrayOutputWithContext(context.Background()) +} + +func (i GetCertificatesCertificateManagedArray) ToGetCertificatesCertificateManagedArrayOutputWithContext(ctx context.Context) GetCertificatesCertificateManagedArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCertificatesCertificateManagedArrayOutput) +} + +type GetCertificatesCertificateManagedOutput struct{ *pulumi.OutputState } + +func (GetCertificatesCertificateManagedOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetCertificatesCertificateManaged)(nil)).Elem() +} + +func (o GetCertificatesCertificateManagedOutput) ToGetCertificatesCertificateManagedOutput() GetCertificatesCertificateManagedOutput { + return o +} + +func (o GetCertificatesCertificateManagedOutput) ToGetCertificatesCertificateManagedOutputWithContext(ctx context.Context) GetCertificatesCertificateManagedOutput { + return o +} + +// Detailed state of the latest authorization attempt for each domain +// specified for this Managed Certificate. +func (o GetCertificatesCertificateManagedOutput) AuthorizationAttemptInfos() GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput { + return o.ApplyT(func(v GetCertificatesCertificateManaged) []GetCertificatesCertificateManagedAuthorizationAttemptInfo { + return v.AuthorizationAttemptInfos + }).(GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput) +} + +// Authorizations that will be used for performing domain authorization. Either issuanceConfig or dnsAuthorizations should be specificed, but not both. +func (o GetCertificatesCertificateManagedOutput) DnsAuthorizations() pulumi.StringArrayOutput { + return o.ApplyT(func(v GetCertificatesCertificateManaged) []string { return v.DnsAuthorizations }).(pulumi.StringArrayOutput) +} + +// The domains for which a managed SSL certificate will be generated. +// Wildcard domains are only supported with DNS challenge resolution +func (o GetCertificatesCertificateManagedOutput) Domains() pulumi.StringArrayOutput { + return o.ApplyT(func(v GetCertificatesCertificateManaged) []string { return v.Domains }).(pulumi.StringArrayOutput) +} + +// The resource name for a CertificateIssuanceConfig used to configure private PKI certificates in the format projects/*/locations/*/certificateIssuanceConfigs/*. +// If this field is not set, the certificates will instead be publicly signed as documented at https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa. +// Either issuanceConfig or dnsAuthorizations should be specificed, but not both. +func (o GetCertificatesCertificateManagedOutput) IssuanceConfig() pulumi.StringOutput { + return o.ApplyT(func(v GetCertificatesCertificateManaged) string { return v.IssuanceConfig }).(pulumi.StringOutput) +} + +// Information about issues with provisioning this Managed Certificate. +func (o GetCertificatesCertificateManagedOutput) ProvisioningIssues() GetCertificatesCertificateManagedProvisioningIssueArrayOutput { + return o.ApplyT(func(v GetCertificatesCertificateManaged) []GetCertificatesCertificateManagedProvisioningIssue { + return v.ProvisioningIssues + }).(GetCertificatesCertificateManagedProvisioningIssueArrayOutput) +} + +// A state of this Managed Certificate. +func (o GetCertificatesCertificateManagedOutput) State() pulumi.StringOutput { + return o.ApplyT(func(v GetCertificatesCertificateManaged) string { return v.State }).(pulumi.StringOutput) +} + +type GetCertificatesCertificateManagedArrayOutput struct{ *pulumi.OutputState } + +func (GetCertificatesCertificateManagedArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCertificatesCertificateManaged)(nil)).Elem() +} + +func (o GetCertificatesCertificateManagedArrayOutput) ToGetCertificatesCertificateManagedArrayOutput() GetCertificatesCertificateManagedArrayOutput { + return o +} + +func (o GetCertificatesCertificateManagedArrayOutput) ToGetCertificatesCertificateManagedArrayOutputWithContext(ctx context.Context) GetCertificatesCertificateManagedArrayOutput { + return o +} + +func (o GetCertificatesCertificateManagedArrayOutput) Index(i pulumi.IntInput) GetCertificatesCertificateManagedOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetCertificatesCertificateManaged { + return vs[0].([]GetCertificatesCertificateManaged)[vs[1].(int)] + }).(GetCertificatesCertificateManagedOutput) +} + +type GetCertificatesCertificateManagedAuthorizationAttemptInfo struct { + // Human readable explanation for reaching the state. Provided to help + // address the configuration issues. + // Not guaranteed to be stable. For programmatic access use 'failure_reason' field. + Details string `pulumi:"details"` + // Domain name of the authorization attempt. + Domain string `pulumi:"domain"` + // Reason for failure of the authorization attempt for the domain. + FailureReason string `pulumi:"failureReason"` + // State of the domain for managed certificate issuance. + State string `pulumi:"state"` +} + +// GetCertificatesCertificateManagedAuthorizationAttemptInfoInput is an input type that accepts GetCertificatesCertificateManagedAuthorizationAttemptInfoArgs and GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput values. +// You can construct a concrete instance of `GetCertificatesCertificateManagedAuthorizationAttemptInfoInput` via: +// +// GetCertificatesCertificateManagedAuthorizationAttemptInfoArgs{...} +type GetCertificatesCertificateManagedAuthorizationAttemptInfoInput interface { + pulumi.Input + + ToGetCertificatesCertificateManagedAuthorizationAttemptInfoOutput() GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput + ToGetCertificatesCertificateManagedAuthorizationAttemptInfoOutputWithContext(context.Context) GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput +} + +type GetCertificatesCertificateManagedAuthorizationAttemptInfoArgs struct { + // Human readable explanation for reaching the state. Provided to help + // address the configuration issues. + // Not guaranteed to be stable. For programmatic access use 'failure_reason' field. + Details pulumi.StringInput `pulumi:"details"` + // Domain name of the authorization attempt. + Domain pulumi.StringInput `pulumi:"domain"` + // Reason for failure of the authorization attempt for the domain. + FailureReason pulumi.StringInput `pulumi:"failureReason"` + // State of the domain for managed certificate issuance. + State pulumi.StringInput `pulumi:"state"` +} + +func (GetCertificatesCertificateManagedAuthorizationAttemptInfoArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetCertificatesCertificateManagedAuthorizationAttemptInfo)(nil)).Elem() +} + +func (i GetCertificatesCertificateManagedAuthorizationAttemptInfoArgs) ToGetCertificatesCertificateManagedAuthorizationAttemptInfoOutput() GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput { + return i.ToGetCertificatesCertificateManagedAuthorizationAttemptInfoOutputWithContext(context.Background()) +} + +func (i GetCertificatesCertificateManagedAuthorizationAttemptInfoArgs) ToGetCertificatesCertificateManagedAuthorizationAttemptInfoOutputWithContext(ctx context.Context) GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput) +} + +// GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayInput is an input type that accepts GetCertificatesCertificateManagedAuthorizationAttemptInfoArray and GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput values. +// You can construct a concrete instance of `GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayInput` via: +// +// GetCertificatesCertificateManagedAuthorizationAttemptInfoArray{ GetCertificatesCertificateManagedAuthorizationAttemptInfoArgs{...} } +type GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayInput interface { + pulumi.Input + + ToGetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput() GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput + ToGetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutputWithContext(context.Context) GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput +} + +type GetCertificatesCertificateManagedAuthorizationAttemptInfoArray []GetCertificatesCertificateManagedAuthorizationAttemptInfoInput + +func (GetCertificatesCertificateManagedAuthorizationAttemptInfoArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCertificatesCertificateManagedAuthorizationAttemptInfo)(nil)).Elem() +} + +func (i GetCertificatesCertificateManagedAuthorizationAttemptInfoArray) ToGetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput() GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput { + return i.ToGetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutputWithContext(context.Background()) +} + +func (i GetCertificatesCertificateManagedAuthorizationAttemptInfoArray) ToGetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutputWithContext(ctx context.Context) GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput) +} + +type GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput struct{ *pulumi.OutputState } + +func (GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetCertificatesCertificateManagedAuthorizationAttemptInfo)(nil)).Elem() +} + +func (o GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput) ToGetCertificatesCertificateManagedAuthorizationAttemptInfoOutput() GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput { + return o +} + +func (o GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput) ToGetCertificatesCertificateManagedAuthorizationAttemptInfoOutputWithContext(ctx context.Context) GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput { + return o +} + +// Human readable explanation for reaching the state. Provided to help +// address the configuration issues. +// Not guaranteed to be stable. For programmatic access use 'failure_reason' field. +func (o GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput) Details() pulumi.StringOutput { + return o.ApplyT(func(v GetCertificatesCertificateManagedAuthorizationAttemptInfo) string { return v.Details }).(pulumi.StringOutput) +} + +// Domain name of the authorization attempt. +func (o GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput) Domain() pulumi.StringOutput { + return o.ApplyT(func(v GetCertificatesCertificateManagedAuthorizationAttemptInfo) string { return v.Domain }).(pulumi.StringOutput) +} + +// Reason for failure of the authorization attempt for the domain. +func (o GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput) FailureReason() pulumi.StringOutput { + return o.ApplyT(func(v GetCertificatesCertificateManagedAuthorizationAttemptInfo) string { return v.FailureReason }).(pulumi.StringOutput) +} + +// State of the domain for managed certificate issuance. +func (o GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput) State() pulumi.StringOutput { + return o.ApplyT(func(v GetCertificatesCertificateManagedAuthorizationAttemptInfo) string { return v.State }).(pulumi.StringOutput) +} + +type GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput struct{ *pulumi.OutputState } + +func (GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCertificatesCertificateManagedAuthorizationAttemptInfo)(nil)).Elem() +} + +func (o GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput) ToGetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput() GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput { + return o +} + +func (o GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput) ToGetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutputWithContext(ctx context.Context) GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput { + return o +} + +func (o GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput) Index(i pulumi.IntInput) GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetCertificatesCertificateManagedAuthorizationAttemptInfo { + return vs[0].([]GetCertificatesCertificateManagedAuthorizationAttemptInfo)[vs[1].(int)] + }).(GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput) +} + +type GetCertificatesCertificateManagedProvisioningIssue struct { + // Human readable explanation about the issue. Provided to help address + // the configuration issues. + // Not guaranteed to be stable. For programmatic access use 'reason' field. + Details string `pulumi:"details"` + // Reason for provisioning failures. + Reason string `pulumi:"reason"` +} + +// GetCertificatesCertificateManagedProvisioningIssueInput is an input type that accepts GetCertificatesCertificateManagedProvisioningIssueArgs and GetCertificatesCertificateManagedProvisioningIssueOutput values. +// You can construct a concrete instance of `GetCertificatesCertificateManagedProvisioningIssueInput` via: +// +// GetCertificatesCertificateManagedProvisioningIssueArgs{...} +type GetCertificatesCertificateManagedProvisioningIssueInput interface { + pulumi.Input + + ToGetCertificatesCertificateManagedProvisioningIssueOutput() GetCertificatesCertificateManagedProvisioningIssueOutput + ToGetCertificatesCertificateManagedProvisioningIssueOutputWithContext(context.Context) GetCertificatesCertificateManagedProvisioningIssueOutput +} + +type GetCertificatesCertificateManagedProvisioningIssueArgs struct { + // Human readable explanation about the issue. Provided to help address + // the configuration issues. + // Not guaranteed to be stable. For programmatic access use 'reason' field. + Details pulumi.StringInput `pulumi:"details"` + // Reason for provisioning failures. + Reason pulumi.StringInput `pulumi:"reason"` +} + +func (GetCertificatesCertificateManagedProvisioningIssueArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetCertificatesCertificateManagedProvisioningIssue)(nil)).Elem() +} + +func (i GetCertificatesCertificateManagedProvisioningIssueArgs) ToGetCertificatesCertificateManagedProvisioningIssueOutput() GetCertificatesCertificateManagedProvisioningIssueOutput { + return i.ToGetCertificatesCertificateManagedProvisioningIssueOutputWithContext(context.Background()) +} + +func (i GetCertificatesCertificateManagedProvisioningIssueArgs) ToGetCertificatesCertificateManagedProvisioningIssueOutputWithContext(ctx context.Context) GetCertificatesCertificateManagedProvisioningIssueOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCertificatesCertificateManagedProvisioningIssueOutput) +} + +// GetCertificatesCertificateManagedProvisioningIssueArrayInput is an input type that accepts GetCertificatesCertificateManagedProvisioningIssueArray and GetCertificatesCertificateManagedProvisioningIssueArrayOutput values. +// You can construct a concrete instance of `GetCertificatesCertificateManagedProvisioningIssueArrayInput` via: +// +// GetCertificatesCertificateManagedProvisioningIssueArray{ GetCertificatesCertificateManagedProvisioningIssueArgs{...} } +type GetCertificatesCertificateManagedProvisioningIssueArrayInput interface { + pulumi.Input + + ToGetCertificatesCertificateManagedProvisioningIssueArrayOutput() GetCertificatesCertificateManagedProvisioningIssueArrayOutput + ToGetCertificatesCertificateManagedProvisioningIssueArrayOutputWithContext(context.Context) GetCertificatesCertificateManagedProvisioningIssueArrayOutput +} + +type GetCertificatesCertificateManagedProvisioningIssueArray []GetCertificatesCertificateManagedProvisioningIssueInput + +func (GetCertificatesCertificateManagedProvisioningIssueArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCertificatesCertificateManagedProvisioningIssue)(nil)).Elem() +} + +func (i GetCertificatesCertificateManagedProvisioningIssueArray) ToGetCertificatesCertificateManagedProvisioningIssueArrayOutput() GetCertificatesCertificateManagedProvisioningIssueArrayOutput { + return i.ToGetCertificatesCertificateManagedProvisioningIssueArrayOutputWithContext(context.Background()) +} + +func (i GetCertificatesCertificateManagedProvisioningIssueArray) ToGetCertificatesCertificateManagedProvisioningIssueArrayOutputWithContext(ctx context.Context) GetCertificatesCertificateManagedProvisioningIssueArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCertificatesCertificateManagedProvisioningIssueArrayOutput) +} + +type GetCertificatesCertificateManagedProvisioningIssueOutput struct{ *pulumi.OutputState } + +func (GetCertificatesCertificateManagedProvisioningIssueOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetCertificatesCertificateManagedProvisioningIssue)(nil)).Elem() +} + +func (o GetCertificatesCertificateManagedProvisioningIssueOutput) ToGetCertificatesCertificateManagedProvisioningIssueOutput() GetCertificatesCertificateManagedProvisioningIssueOutput { + return o +} + +func (o GetCertificatesCertificateManagedProvisioningIssueOutput) ToGetCertificatesCertificateManagedProvisioningIssueOutputWithContext(ctx context.Context) GetCertificatesCertificateManagedProvisioningIssueOutput { + return o +} + +// Human readable explanation about the issue. Provided to help address +// the configuration issues. +// Not guaranteed to be stable. For programmatic access use 'reason' field. +func (o GetCertificatesCertificateManagedProvisioningIssueOutput) Details() pulumi.StringOutput { + return o.ApplyT(func(v GetCertificatesCertificateManagedProvisioningIssue) string { return v.Details }).(pulumi.StringOutput) +} + +// Reason for provisioning failures. +func (o GetCertificatesCertificateManagedProvisioningIssueOutput) Reason() pulumi.StringOutput { + return o.ApplyT(func(v GetCertificatesCertificateManagedProvisioningIssue) string { return v.Reason }).(pulumi.StringOutput) +} + +type GetCertificatesCertificateManagedProvisioningIssueArrayOutput struct{ *pulumi.OutputState } + +func (GetCertificatesCertificateManagedProvisioningIssueArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCertificatesCertificateManagedProvisioningIssue)(nil)).Elem() +} + +func (o GetCertificatesCertificateManagedProvisioningIssueArrayOutput) ToGetCertificatesCertificateManagedProvisioningIssueArrayOutput() GetCertificatesCertificateManagedProvisioningIssueArrayOutput { + return o +} + +func (o GetCertificatesCertificateManagedProvisioningIssueArrayOutput) ToGetCertificatesCertificateManagedProvisioningIssueArrayOutputWithContext(ctx context.Context) GetCertificatesCertificateManagedProvisioningIssueArrayOutput { + return o +} + +func (o GetCertificatesCertificateManagedProvisioningIssueArrayOutput) Index(i pulumi.IntInput) GetCertificatesCertificateManagedProvisioningIssueOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetCertificatesCertificateManagedProvisioningIssue { + return vs[0].([]GetCertificatesCertificateManagedProvisioningIssue)[vs[1].(int)] + }).(GetCertificatesCertificateManagedProvisioningIssueOutput) +} + func init() { pulumi.RegisterInputType(reflect.TypeOf((*CertificateIssuanceConfigCertificateAuthorityConfigInput)(nil)).Elem(), CertificateIssuanceConfigCertificateAuthorityConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*CertificateIssuanceConfigCertificateAuthorityConfigPtrInput)(nil)).Elem(), CertificateIssuanceConfigCertificateAuthorityConfigArgs{}) @@ -2155,6 +2785,14 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetCertificateMapGclbTargetArrayInput)(nil)).Elem(), GetCertificateMapGclbTargetArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetCertificateMapGclbTargetIpConfigInput)(nil)).Elem(), GetCertificateMapGclbTargetIpConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetCertificateMapGclbTargetIpConfigArrayInput)(nil)).Elem(), GetCertificateMapGclbTargetIpConfigArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCertificatesCertificateInput)(nil)).Elem(), GetCertificatesCertificateArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCertificatesCertificateArrayInput)(nil)).Elem(), GetCertificatesCertificateArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCertificatesCertificateManagedInput)(nil)).Elem(), GetCertificatesCertificateManagedArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCertificatesCertificateManagedArrayInput)(nil)).Elem(), GetCertificatesCertificateManagedArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCertificatesCertificateManagedAuthorizationAttemptInfoInput)(nil)).Elem(), GetCertificatesCertificateManagedAuthorizationAttemptInfoArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayInput)(nil)).Elem(), GetCertificatesCertificateManagedAuthorizationAttemptInfoArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCertificatesCertificateManagedProvisioningIssueInput)(nil)).Elem(), GetCertificatesCertificateManagedProvisioningIssueArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCertificatesCertificateManagedProvisioningIssueArrayInput)(nil)).Elem(), GetCertificatesCertificateManagedProvisioningIssueArray{}) pulumi.RegisterOutputType(CertificateIssuanceConfigCertificateAuthorityConfigOutput{}) pulumi.RegisterOutputType(CertificateIssuanceConfigCertificateAuthorityConfigPtrOutput{}) pulumi.RegisterOutputType(CertificateIssuanceConfigCertificateAuthorityConfigCertificateAuthorityServiceConfigOutput{}) @@ -2185,4 +2823,12 @@ func init() { pulumi.RegisterOutputType(GetCertificateMapGclbTargetArrayOutput{}) pulumi.RegisterOutputType(GetCertificateMapGclbTargetIpConfigOutput{}) pulumi.RegisterOutputType(GetCertificateMapGclbTargetIpConfigArrayOutput{}) + pulumi.RegisterOutputType(GetCertificatesCertificateOutput{}) + pulumi.RegisterOutputType(GetCertificatesCertificateArrayOutput{}) + pulumi.RegisterOutputType(GetCertificatesCertificateManagedOutput{}) + pulumi.RegisterOutputType(GetCertificatesCertificateManagedArrayOutput{}) + pulumi.RegisterOutputType(GetCertificatesCertificateManagedAuthorizationAttemptInfoOutput{}) + pulumi.RegisterOutputType(GetCertificatesCertificateManagedAuthorizationAttemptInfoArrayOutput{}) + pulumi.RegisterOutputType(GetCertificatesCertificateManagedProvisioningIssueOutput{}) + pulumi.RegisterOutputType(GetCertificatesCertificateManagedProvisioningIssueArrayOutput{}) } diff --git a/sdk/go/gcp/cloudbuild/pulumiTypes.go b/sdk/go/gcp/cloudbuild/pulumiTypes.go index e98195ea65..27a73eb88c 100644 --- a/sdk/go/gcp/cloudbuild/pulumiTypes.go +++ b/sdk/go/gcp/cloudbuild/pulumiTypes.go @@ -6946,9 +6946,9 @@ func (o WorkerPoolNetworkConfigPtrOutput) PeeredNetworkIpRange() pulumi.StringPt } type WorkerPoolWorkerConfig struct { - // Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + // Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. DiskSizeGb *int `pulumi:"diskSizeGb"` - // Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + // Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. MachineType *string `pulumi:"machineType"` // If true, workers are created without any public address, which prevents network egress to public IPs. NoExternalIp *bool `pulumi:"noExternalIp"` @@ -6966,9 +6966,9 @@ type WorkerPoolWorkerConfigInput interface { } type WorkerPoolWorkerConfigArgs struct { - // Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + // Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. DiskSizeGb pulumi.IntPtrInput `pulumi:"diskSizeGb"` - // Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + // Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. MachineType pulumi.StringPtrInput `pulumi:"machineType"` // If true, workers are created without any public address, which prevents network egress to public IPs. NoExternalIp pulumi.BoolPtrInput `pulumi:"noExternalIp"` @@ -7051,12 +7051,12 @@ func (o WorkerPoolWorkerConfigOutput) ToWorkerPoolWorkerConfigPtrOutputWithConte }).(WorkerPoolWorkerConfigPtrOutput) } -// Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. +// Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. func (o WorkerPoolWorkerConfigOutput) DiskSizeGb() pulumi.IntPtrOutput { return o.ApplyT(func(v WorkerPoolWorkerConfig) *int { return v.DiskSizeGb }).(pulumi.IntPtrOutput) } -// Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. +// Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. func (o WorkerPoolWorkerConfigOutput) MachineType() pulumi.StringPtrOutput { return o.ApplyT(func(v WorkerPoolWorkerConfig) *string { return v.MachineType }).(pulumi.StringPtrOutput) } @@ -7090,7 +7090,7 @@ func (o WorkerPoolWorkerConfigPtrOutput) Elem() WorkerPoolWorkerConfigOutput { }).(WorkerPoolWorkerConfigOutput) } -// Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. +// Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. func (o WorkerPoolWorkerConfigPtrOutput) DiskSizeGb() pulumi.IntPtrOutput { return o.ApplyT(func(v *WorkerPoolWorkerConfig) *int { if v == nil { @@ -7100,7 +7100,7 @@ func (o WorkerPoolWorkerConfigPtrOutput) DiskSizeGb() pulumi.IntPtrOutput { }).(pulumi.IntPtrOutput) } -// Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. +// Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. func (o WorkerPoolWorkerConfigPtrOutput) MachineType() pulumi.StringPtrOutput { return o.ApplyT(func(v *WorkerPoolWorkerConfig) *string { if v == nil { diff --git a/sdk/go/gcp/cloudrun/pulumiTypes.go b/sdk/go/gcp/cloudrun/pulumiTypes.go index 2898252b1f..2c63caf024 100644 --- a/sdk/go/gcp/cloudrun/pulumiTypes.go +++ b/sdk/go/gcp/cloudrun/pulumiTypes.go @@ -6676,8 +6676,7 @@ type ServiceTemplateSpecVolume struct { // Volume's name. Name string `pulumi:"name"` // A filesystem backed by a Network File System share. This filesystem requires the - // run.googleapis.com/execution-environment annotation to be set to "gen2" and - // run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + // run.googleapis.com/execution-environment annotation to be unset or set to "gen2" // Structure is documented below. Nfs *ServiceTemplateSpecVolumeNfs `pulumi:"nfs"` // The secret's value will be presented as the content of a file whose @@ -6708,8 +6707,7 @@ type ServiceTemplateSpecVolumeArgs struct { // Volume's name. Name pulumi.StringInput `pulumi:"name"` // A filesystem backed by a Network File System share. This filesystem requires the - // run.googleapis.com/execution-environment annotation to be set to "gen2" and - // run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + // run.googleapis.com/execution-environment annotation to be unset or set to "gen2" // Structure is documented below. Nfs ServiceTemplateSpecVolumeNfsPtrInput `pulumi:"nfs"` // The secret's value will be presented as the content of a file whose @@ -6788,8 +6786,7 @@ func (o ServiceTemplateSpecVolumeOutput) Name() pulumi.StringOutput { } // A filesystem backed by a Network File System share. This filesystem requires the -// run.googleapis.com/execution-environment annotation to be set to "gen2" and -// run.googleapis.com/launch-stage set to "BETA" or "ALPHA". +// run.googleapis.com/execution-environment annotation to be unset or set to "gen2" // Structure is documented below. func (o ServiceTemplateSpecVolumeOutput) Nfs() ServiceTemplateSpecVolumeNfsPtrOutput { return o.ApplyT(func(v ServiceTemplateSpecVolume) *ServiceTemplateSpecVolumeNfs { return v.Nfs }).(ServiceTemplateSpecVolumeNfsPtrOutput) @@ -6826,8 +6823,7 @@ func (o ServiceTemplateSpecVolumeArrayOutput) Index(i pulumi.IntInput) ServiceTe type ServiceTemplateSpecVolumeCsi struct { // Unique name representing the type of file system to be created. Cloud Run supports the following values: // * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - // run.googleapis.com/execution-environment annotation to be set to "gen2" and - // run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + // run.googleapis.com/execution-environment annotation to be unset or set to "gen2" Driver string `pulumi:"driver"` // If true, all mounts created from this volume will be read-only. ReadOnly *bool `pulumi:"readOnly"` @@ -6851,8 +6847,7 @@ type ServiceTemplateSpecVolumeCsiInput interface { type ServiceTemplateSpecVolumeCsiArgs struct { // Unique name representing the type of file system to be created. Cloud Run supports the following values: // * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - // run.googleapis.com/execution-environment annotation to be set to "gen2" and - // run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + // run.googleapis.com/execution-environment annotation to be unset or set to "gen2" Driver pulumi.StringInput `pulumi:"driver"` // If true, all mounts created from this volume will be read-only. ReadOnly pulumi.BoolPtrInput `pulumi:"readOnly"` @@ -6941,8 +6936,7 @@ func (o ServiceTemplateSpecVolumeCsiOutput) ToServiceTemplateSpecVolumeCsiPtrOut // Unique name representing the type of file system to be created. Cloud Run supports the following values: // - gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the -// run.googleapis.com/execution-environment annotation to be set to "gen2" and -// run.googleapis.com/launch-stage set to "BETA" or "ALPHA". +// run.googleapis.com/execution-environment annotation to be unset or set to "gen2" func (o ServiceTemplateSpecVolumeCsiOutput) Driver() pulumi.StringOutput { return o.ApplyT(func(v ServiceTemplateSpecVolumeCsi) string { return v.Driver }).(pulumi.StringOutput) } @@ -6985,8 +6979,7 @@ func (o ServiceTemplateSpecVolumeCsiPtrOutput) Elem() ServiceTemplateSpecVolumeC // Unique name representing the type of file system to be created. Cloud Run supports the following values: // - gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the -// run.googleapis.com/execution-environment annotation to be set to "gen2" and -// run.googleapis.com/launch-stage set to "BETA" or "ALPHA". +// run.googleapis.com/execution-environment annotation to be unset or set to "gen2" func (o ServiceTemplateSpecVolumeCsiPtrOutput) Driver() pulumi.StringPtrOutput { return o.ApplyT(func(v *ServiceTemplateSpecVolumeCsi) *string { if v == nil { @@ -11573,8 +11566,7 @@ type GetServiceTemplateSpecVolume struct { // The name of the Cloud Run Service. Name string `pulumi:"name"` // A filesystem backed by a Network File System share. This filesystem requires the - // run.googleapis.com/execution-environment annotation to be set to "gen2" and - // run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + // run.googleapis.com/execution-environment annotation to be unset or set to "gen2" Nfs []GetServiceTemplateSpecVolumeNf `pulumi:"nfs"` // The secret's value will be presented as the content of a file whose // name is defined in the item path. If no items are defined, the name of @@ -11601,8 +11593,7 @@ type GetServiceTemplateSpecVolumeArgs struct { // The name of the Cloud Run Service. Name pulumi.StringInput `pulumi:"name"` // A filesystem backed by a Network File System share. This filesystem requires the - // run.googleapis.com/execution-environment annotation to be set to "gen2" and - // run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + // run.googleapis.com/execution-environment annotation to be unset or set to "gen2" Nfs GetServiceTemplateSpecVolumeNfArrayInput `pulumi:"nfs"` // The secret's value will be presented as the content of a file whose // name is defined in the item path. If no items are defined, the name of @@ -11677,8 +11668,7 @@ func (o GetServiceTemplateSpecVolumeOutput) Name() pulumi.StringOutput { } // A filesystem backed by a Network File System share. This filesystem requires the -// run.googleapis.com/execution-environment annotation to be set to "gen2" and -// run.googleapis.com/launch-stage set to "BETA" or "ALPHA". +// run.googleapis.com/execution-environment annotation to be unset or set to "gen2" func (o GetServiceTemplateSpecVolumeOutput) Nfs() GetServiceTemplateSpecVolumeNfArrayOutput { return o.ApplyT(func(v GetServiceTemplateSpecVolume) []GetServiceTemplateSpecVolumeNf { return v.Nfs }).(GetServiceTemplateSpecVolumeNfArrayOutput) } @@ -11713,8 +11703,7 @@ func (o GetServiceTemplateSpecVolumeArrayOutput) Index(i pulumi.IntInput) GetSer type GetServiceTemplateSpecVolumeCsi struct { // Unique name representing the type of file system to be created. Cloud Run supports the following values: // * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - // run.googleapis.com/execution-environment annotation to be set to "gen2" and - // run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + // run.googleapis.com/execution-environment annotation to be unset or set to "gen2" Driver string `pulumi:"driver"` // If true, all mounts created from this volume will be read-only. ReadOnly bool `pulumi:"readOnly"` @@ -11738,8 +11727,7 @@ type GetServiceTemplateSpecVolumeCsiInput interface { type GetServiceTemplateSpecVolumeCsiArgs struct { // Unique name representing the type of file system to be created. Cloud Run supports the following values: // * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - // run.googleapis.com/execution-environment annotation to be set to "gen2" and - // run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + // run.googleapis.com/execution-environment annotation to be unset or set to "gen2" Driver pulumi.StringInput `pulumi:"driver"` // If true, all mounts created from this volume will be read-only. ReadOnly pulumi.BoolInput `pulumi:"readOnly"` @@ -11802,8 +11790,7 @@ func (o GetServiceTemplateSpecVolumeCsiOutput) ToGetServiceTemplateSpecVolumeCsi // Unique name representing the type of file system to be created. Cloud Run supports the following values: // - gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the -// run.googleapis.com/execution-environment annotation to be set to "gen2" and -// run.googleapis.com/launch-stage set to "BETA" or "ALPHA". +// run.googleapis.com/execution-environment annotation to be unset or set to "gen2" func (o GetServiceTemplateSpecVolumeCsiOutput) Driver() pulumi.StringOutput { return o.ApplyT(func(v GetServiceTemplateSpecVolumeCsi) string { return v.Driver }).(pulumi.StringOutput) } diff --git a/sdk/go/gcp/cloudrunv2/pulumiTypes.go b/sdk/go/gcp/cloudrunv2/pulumiTypes.go index d375d6ccec..8c3541ac8f 100644 --- a/sdk/go/gcp/cloudrunv2/pulumiTypes.go +++ b/sdk/go/gcp/cloudrunv2/pulumiTypes.go @@ -2322,12 +2322,12 @@ type JobTemplateTemplateVolume struct { // Ephemeral storage used as a shared volume. // Structure is documented below. EmptyDir *JobTemplateTemplateVolumeEmptyDir `pulumi:"emptyDir"` - // Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + // Cloud Storage bucket mounted as a volume using GCSFuse. // Structure is documented below. Gcs *JobTemplateTemplateVolumeGcs `pulumi:"gcs"` // Volume's name. Name string `pulumi:"name"` - // NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + // NFS share mounted as a volume. // Structure is documented below. Nfs *JobTemplateTemplateVolumeNfs `pulumi:"nfs"` // Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret @@ -2353,12 +2353,12 @@ type JobTemplateTemplateVolumeArgs struct { // Ephemeral storage used as a shared volume. // Structure is documented below. EmptyDir JobTemplateTemplateVolumeEmptyDirPtrInput `pulumi:"emptyDir"` - // Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + // Cloud Storage bucket mounted as a volume using GCSFuse. // Structure is documented below. Gcs JobTemplateTemplateVolumeGcsPtrInput `pulumi:"gcs"` // Volume's name. Name pulumi.StringInput `pulumi:"name"` - // NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + // NFS share mounted as a volume. // Structure is documented below. Nfs JobTemplateTemplateVolumeNfsPtrInput `pulumi:"nfs"` // Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret @@ -2431,7 +2431,7 @@ func (o JobTemplateTemplateVolumeOutput) EmptyDir() JobTemplateTemplateVolumeEmp return o.ApplyT(func(v JobTemplateTemplateVolume) *JobTemplateTemplateVolumeEmptyDir { return v.EmptyDir }).(JobTemplateTemplateVolumeEmptyDirPtrOutput) } -// Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. +// Cloud Storage bucket mounted as a volume using GCSFuse. // Structure is documented below. func (o JobTemplateTemplateVolumeOutput) Gcs() JobTemplateTemplateVolumeGcsPtrOutput { return o.ApplyT(func(v JobTemplateTemplateVolume) *JobTemplateTemplateVolumeGcs { return v.Gcs }).(JobTemplateTemplateVolumeGcsPtrOutput) @@ -2442,7 +2442,7 @@ func (o JobTemplateTemplateVolumeOutput) Name() pulumi.StringOutput { return o.ApplyT(func(v JobTemplateTemplateVolume) string { return v.Name }).(pulumi.StringOutput) } -// NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. +// NFS share mounted as a volume. // Structure is documented below. func (o JobTemplateTemplateVolumeOutput) Nfs() JobTemplateTemplateVolumeNfsPtrOutput { return o.ApplyT(func(v JobTemplateTemplateVolume) *JobTemplateTemplateVolumeNfs { return v.Nfs }).(JobTemplateTemplateVolumeNfsPtrOutput) @@ -4759,6 +4759,9 @@ type ServiceTemplate struct { Scaling *ServiceTemplateScaling `pulumi:"scaling"` // Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. ServiceAccount *string `pulumi:"serviceAccount"` + // Enables Cloud Service Mesh for this Revision. + // Structure is documented below. + ServiceMesh *ServiceTemplateServiceMesh `pulumi:"serviceMesh"` // Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity SessionAffinity *bool `pulumi:"sessionAffinity"` // Max allowed time for an instance to respond to a request. @@ -4812,6 +4815,9 @@ type ServiceTemplateArgs struct { Scaling ServiceTemplateScalingPtrInput `pulumi:"scaling"` // Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. ServiceAccount pulumi.StringPtrInput `pulumi:"serviceAccount"` + // Enables Cloud Service Mesh for this Revision. + // Structure is documented below. + ServiceMesh ServiceTemplateServiceMeshPtrInput `pulumi:"serviceMesh"` // Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity SessionAffinity pulumi.BoolPtrInput `pulumi:"sessionAffinity"` // Max allowed time for an instance to respond to a request. @@ -4957,6 +4963,12 @@ func (o ServiceTemplateOutput) ServiceAccount() pulumi.StringPtrOutput { return o.ApplyT(func(v ServiceTemplate) *string { return v.ServiceAccount }).(pulumi.StringPtrOutput) } +// Enables Cloud Service Mesh for this Revision. +// Structure is documented below. +func (o ServiceTemplateOutput) ServiceMesh() ServiceTemplateServiceMeshPtrOutput { + return o.ApplyT(func(v ServiceTemplate) *ServiceTemplateServiceMesh { return v.ServiceMesh }).(ServiceTemplateServiceMeshPtrOutput) +} + // Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity func (o ServiceTemplateOutput) SessionAffinity() pulumi.BoolPtrOutput { return o.ApplyT(func(v ServiceTemplate) *bool { return v.SessionAffinity }).(pulumi.BoolPtrOutput) @@ -5104,6 +5116,17 @@ func (o ServiceTemplatePtrOutput) ServiceAccount() pulumi.StringPtrOutput { }).(pulumi.StringPtrOutput) } +// Enables Cloud Service Mesh for this Revision. +// Structure is documented below. +func (o ServiceTemplatePtrOutput) ServiceMesh() ServiceTemplateServiceMeshPtrOutput { + return o.ApplyT(func(v *ServiceTemplate) *ServiceTemplateServiceMesh { + if v == nil { + return nil + } + return v.ServiceMesh + }).(ServiceTemplateServiceMeshPtrOutput) +} + // Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity func (o ServiceTemplatePtrOutput) SessionAffinity() pulumi.BoolPtrOutput { return o.ApplyT(func(v *ServiceTemplate) *bool { @@ -8114,6 +8137,151 @@ func (o ServiceTemplateScalingPtrOutput) MinInstanceCount() pulumi.IntPtrOutput }).(pulumi.IntPtrOutput) } +type ServiceTemplateServiceMesh struct { + // The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + // + // *** + Mesh *string `pulumi:"mesh"` +} + +// ServiceTemplateServiceMeshInput is an input type that accepts ServiceTemplateServiceMeshArgs and ServiceTemplateServiceMeshOutput values. +// You can construct a concrete instance of `ServiceTemplateServiceMeshInput` via: +// +// ServiceTemplateServiceMeshArgs{...} +type ServiceTemplateServiceMeshInput interface { + pulumi.Input + + ToServiceTemplateServiceMeshOutput() ServiceTemplateServiceMeshOutput + ToServiceTemplateServiceMeshOutputWithContext(context.Context) ServiceTemplateServiceMeshOutput +} + +type ServiceTemplateServiceMeshArgs struct { + // The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + // + // *** + Mesh pulumi.StringPtrInput `pulumi:"mesh"` +} + +func (ServiceTemplateServiceMeshArgs) ElementType() reflect.Type { + return reflect.TypeOf((*ServiceTemplateServiceMesh)(nil)).Elem() +} + +func (i ServiceTemplateServiceMeshArgs) ToServiceTemplateServiceMeshOutput() ServiceTemplateServiceMeshOutput { + return i.ToServiceTemplateServiceMeshOutputWithContext(context.Background()) +} + +func (i ServiceTemplateServiceMeshArgs) ToServiceTemplateServiceMeshOutputWithContext(ctx context.Context) ServiceTemplateServiceMeshOutput { + return pulumi.ToOutputWithContext(ctx, i).(ServiceTemplateServiceMeshOutput) +} + +func (i ServiceTemplateServiceMeshArgs) ToServiceTemplateServiceMeshPtrOutput() ServiceTemplateServiceMeshPtrOutput { + return i.ToServiceTemplateServiceMeshPtrOutputWithContext(context.Background()) +} + +func (i ServiceTemplateServiceMeshArgs) ToServiceTemplateServiceMeshPtrOutputWithContext(ctx context.Context) ServiceTemplateServiceMeshPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(ServiceTemplateServiceMeshOutput).ToServiceTemplateServiceMeshPtrOutputWithContext(ctx) +} + +// ServiceTemplateServiceMeshPtrInput is an input type that accepts ServiceTemplateServiceMeshArgs, ServiceTemplateServiceMeshPtr and ServiceTemplateServiceMeshPtrOutput values. +// You can construct a concrete instance of `ServiceTemplateServiceMeshPtrInput` via: +// +// ServiceTemplateServiceMeshArgs{...} +// +// or: +// +// nil +type ServiceTemplateServiceMeshPtrInput interface { + pulumi.Input + + ToServiceTemplateServiceMeshPtrOutput() ServiceTemplateServiceMeshPtrOutput + ToServiceTemplateServiceMeshPtrOutputWithContext(context.Context) ServiceTemplateServiceMeshPtrOutput +} + +type serviceTemplateServiceMeshPtrType ServiceTemplateServiceMeshArgs + +func ServiceTemplateServiceMeshPtr(v *ServiceTemplateServiceMeshArgs) ServiceTemplateServiceMeshPtrInput { + return (*serviceTemplateServiceMeshPtrType)(v) +} + +func (*serviceTemplateServiceMeshPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**ServiceTemplateServiceMesh)(nil)).Elem() +} + +func (i *serviceTemplateServiceMeshPtrType) ToServiceTemplateServiceMeshPtrOutput() ServiceTemplateServiceMeshPtrOutput { + return i.ToServiceTemplateServiceMeshPtrOutputWithContext(context.Background()) +} + +func (i *serviceTemplateServiceMeshPtrType) ToServiceTemplateServiceMeshPtrOutputWithContext(ctx context.Context) ServiceTemplateServiceMeshPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(ServiceTemplateServiceMeshPtrOutput) +} + +type ServiceTemplateServiceMeshOutput struct{ *pulumi.OutputState } + +func (ServiceTemplateServiceMeshOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ServiceTemplateServiceMesh)(nil)).Elem() +} + +func (o ServiceTemplateServiceMeshOutput) ToServiceTemplateServiceMeshOutput() ServiceTemplateServiceMeshOutput { + return o +} + +func (o ServiceTemplateServiceMeshOutput) ToServiceTemplateServiceMeshOutputWithContext(ctx context.Context) ServiceTemplateServiceMeshOutput { + return o +} + +func (o ServiceTemplateServiceMeshOutput) ToServiceTemplateServiceMeshPtrOutput() ServiceTemplateServiceMeshPtrOutput { + return o.ToServiceTemplateServiceMeshPtrOutputWithContext(context.Background()) +} + +func (o ServiceTemplateServiceMeshOutput) ToServiceTemplateServiceMeshPtrOutputWithContext(ctx context.Context) ServiceTemplateServiceMeshPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v ServiceTemplateServiceMesh) *ServiceTemplateServiceMesh { + return &v + }).(ServiceTemplateServiceMeshPtrOutput) +} + +// The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. +// +// *** +func (o ServiceTemplateServiceMeshOutput) Mesh() pulumi.StringPtrOutput { + return o.ApplyT(func(v ServiceTemplateServiceMesh) *string { return v.Mesh }).(pulumi.StringPtrOutput) +} + +type ServiceTemplateServiceMeshPtrOutput struct{ *pulumi.OutputState } + +func (ServiceTemplateServiceMeshPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**ServiceTemplateServiceMesh)(nil)).Elem() +} + +func (o ServiceTemplateServiceMeshPtrOutput) ToServiceTemplateServiceMeshPtrOutput() ServiceTemplateServiceMeshPtrOutput { + return o +} + +func (o ServiceTemplateServiceMeshPtrOutput) ToServiceTemplateServiceMeshPtrOutputWithContext(ctx context.Context) ServiceTemplateServiceMeshPtrOutput { + return o +} + +func (o ServiceTemplateServiceMeshPtrOutput) Elem() ServiceTemplateServiceMeshOutput { + return o.ApplyT(func(v *ServiceTemplateServiceMesh) ServiceTemplateServiceMesh { + if v != nil { + return *v + } + var ret ServiceTemplateServiceMesh + return ret + }).(ServiceTemplateServiceMeshOutput) +} + +// The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. +// +// *** +func (o ServiceTemplateServiceMeshPtrOutput) Mesh() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ServiceTemplateServiceMesh) *string { + if v == nil { + return nil + } + return v.Mesh + }).(pulumi.StringPtrOutput) +} + type ServiceTemplateVolume struct { // For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. // Structure is documented below. @@ -8121,7 +8289,7 @@ type ServiceTemplateVolume struct { // Ephemeral storage used as a shared volume. // Structure is documented below. EmptyDir *ServiceTemplateVolumeEmptyDir `pulumi:"emptyDir"` - // Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + // Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. // Structure is documented below. Gcs *ServiceTemplateVolumeGcs `pulumi:"gcs"` // Volume's name. @@ -8152,7 +8320,7 @@ type ServiceTemplateVolumeArgs struct { // Ephemeral storage used as a shared volume. // Structure is documented below. EmptyDir ServiceTemplateVolumeEmptyDirPtrInput `pulumi:"emptyDir"` - // Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + // Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. // Structure is documented below. Gcs ServiceTemplateVolumeGcsPtrInput `pulumi:"gcs"` // Volume's name. @@ -8228,7 +8396,7 @@ func (o ServiceTemplateVolumeOutput) EmptyDir() ServiceTemplateVolumeEmptyDirPtr return o.ApplyT(func(v ServiceTemplateVolume) *ServiceTemplateVolumeEmptyDir { return v.EmptyDir }).(ServiceTemplateVolumeEmptyDirPtrOutput) } -// Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. +// Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. // Structure is documented below. func (o ServiceTemplateVolumeOutput) Gcs() ServiceTemplateVolumeGcsPtrOutput { return o.ApplyT(func(v ServiceTemplateVolume) *ServiceTemplateVolumeGcs { return v.Gcs }).(ServiceTemplateVolumeGcsPtrOutput) @@ -8732,8 +8900,6 @@ type ServiceTemplateVolumeNfs struct { // Path that is exported by the NFS server. Path string `pulumi:"path"` // If true, mount the NFS volume as read only - // - // *** ReadOnly *bool `pulumi:"readOnly"` // Hostname or IP address of the NFS server Server string `pulumi:"server"` @@ -8754,8 +8920,6 @@ type ServiceTemplateVolumeNfsArgs struct { // Path that is exported by the NFS server. Path pulumi.StringInput `pulumi:"path"` // If true, mount the NFS volume as read only - // - // *** ReadOnly pulumi.BoolPtrInput `pulumi:"readOnly"` // Hostname or IP address of the NFS server Server pulumi.StringInput `pulumi:"server"` @@ -8844,8 +9008,6 @@ func (o ServiceTemplateVolumeNfsOutput) Path() pulumi.StringOutput { } // If true, mount the NFS volume as read only -// -// *** func (o ServiceTemplateVolumeNfsOutput) ReadOnly() pulumi.BoolPtrOutput { return o.ApplyT(func(v ServiceTemplateVolumeNfs) *bool { return v.ReadOnly }).(pulumi.BoolPtrOutput) } @@ -8890,8 +9052,6 @@ func (o ServiceTemplateVolumeNfsPtrOutput) Path() pulumi.StringPtrOutput { } // If true, mount the NFS volume as read only -// -// *** func (o ServiceTemplateVolumeNfsPtrOutput) ReadOnly() pulumi.BoolPtrOutput { return o.ApplyT(func(v *ServiceTemplateVolumeNfs) *bool { if v == nil { @@ -11529,11 +11689,11 @@ type GetJobTemplateTemplateVolume struct { CloudSqlInstances []GetJobTemplateTemplateVolumeCloudSqlInstance `pulumi:"cloudSqlInstances"` // Ephemeral storage used as a shared volume. EmptyDirs []GetJobTemplateTemplateVolumeEmptyDir `pulumi:"emptyDirs"` - // Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + // Cloud Storage bucket mounted as a volume using GCSFuse. Gcs []GetJobTemplateTemplateVolumeGc `pulumi:"gcs"` // The name of the Cloud Run v2 Job. Name string `pulumi:"name"` - // NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + // NFS share mounted as a volume. Nfs []GetJobTemplateTemplateVolumeNf `pulumi:"nfs"` // Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret Secrets []GetJobTemplateTemplateVolumeSecret `pulumi:"secrets"` @@ -11555,11 +11715,11 @@ type GetJobTemplateTemplateVolumeArgs struct { CloudSqlInstances GetJobTemplateTemplateVolumeCloudSqlInstanceArrayInput `pulumi:"cloudSqlInstances"` // Ephemeral storage used as a shared volume. EmptyDirs GetJobTemplateTemplateVolumeEmptyDirArrayInput `pulumi:"emptyDirs"` - // Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + // Cloud Storage bucket mounted as a volume using GCSFuse. Gcs GetJobTemplateTemplateVolumeGcArrayInput `pulumi:"gcs"` // The name of the Cloud Run v2 Job. Name pulumi.StringInput `pulumi:"name"` - // NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + // NFS share mounted as a volume. Nfs GetJobTemplateTemplateVolumeNfArrayInput `pulumi:"nfs"` // Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret Secrets GetJobTemplateTemplateVolumeSecretArrayInput `pulumi:"secrets"` @@ -11628,7 +11788,7 @@ func (o GetJobTemplateTemplateVolumeOutput) EmptyDirs() GetJobTemplateTemplateVo return o.ApplyT(func(v GetJobTemplateTemplateVolume) []GetJobTemplateTemplateVolumeEmptyDir { return v.EmptyDirs }).(GetJobTemplateTemplateVolumeEmptyDirArrayOutput) } -// Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. +// Cloud Storage bucket mounted as a volume using GCSFuse. func (o GetJobTemplateTemplateVolumeOutput) Gcs() GetJobTemplateTemplateVolumeGcArrayOutput { return o.ApplyT(func(v GetJobTemplateTemplateVolume) []GetJobTemplateTemplateVolumeGc { return v.Gcs }).(GetJobTemplateTemplateVolumeGcArrayOutput) } @@ -11638,7 +11798,7 @@ func (o GetJobTemplateTemplateVolumeOutput) Name() pulumi.StringOutput { return o.ApplyT(func(v GetJobTemplateTemplateVolume) string { return v.Name }).(pulumi.StringOutput) } -// NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. +// NFS share mounted as a volume. func (o GetJobTemplateTemplateVolumeOutput) Nfs() GetJobTemplateTemplateVolumeNfArrayOutput { return o.ApplyT(func(v GetJobTemplateTemplateVolume) []GetJobTemplateTemplateVolumeNf { return v.Nfs }).(GetJobTemplateTemplateVolumeNfArrayOutput) } @@ -13139,6 +13299,8 @@ type GetServiceTemplate struct { Scalings []GetServiceTemplateScaling `pulumi:"scalings"` // Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. ServiceAccount string `pulumi:"serviceAccount"` + // Enables Cloud Service Mesh for this Revision. + ServiceMeshes []GetServiceTemplateServiceMesh `pulumi:"serviceMeshes"` // Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity SessionAffinity bool `pulumi:"sessionAffinity"` // Max allowed time for an instance to respond to a request. @@ -13191,6 +13353,8 @@ type GetServiceTemplateArgs struct { Scalings GetServiceTemplateScalingArrayInput `pulumi:"scalings"` // Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. ServiceAccount pulumi.StringInput `pulumi:"serviceAccount"` + // Enables Cloud Service Mesh for this Revision. + ServiceMeshes GetServiceTemplateServiceMeshArrayInput `pulumi:"serviceMeshes"` // Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity SessionAffinity pulumi.BoolInput `pulumi:"sessionAffinity"` // Max allowed time for an instance to respond to a request. @@ -13309,6 +13473,11 @@ func (o GetServiceTemplateOutput) ServiceAccount() pulumi.StringOutput { return o.ApplyT(func(v GetServiceTemplate) string { return v.ServiceAccount }).(pulumi.StringOutput) } +// Enables Cloud Service Mesh for this Revision. +func (o GetServiceTemplateOutput) ServiceMeshes() GetServiceTemplateServiceMeshArrayOutput { + return o.ApplyT(func(v GetServiceTemplate) []GetServiceTemplateServiceMesh { return v.ServiceMeshes }).(GetServiceTemplateServiceMeshArrayOutput) +} + // Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity func (o GetServiceTemplateOutput) SessionAffinity() pulumi.BoolOutput { return o.ApplyT(func(v GetServiceTemplate) bool { return v.SessionAffinity }).(pulumi.BoolOutput) @@ -15512,12 +15681,109 @@ func (o GetServiceTemplateScalingArrayOutput) Index(i pulumi.IntInput) GetServic }).(GetServiceTemplateScalingOutput) } +type GetServiceTemplateServiceMesh struct { + // The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + Mesh string `pulumi:"mesh"` +} + +// GetServiceTemplateServiceMeshInput is an input type that accepts GetServiceTemplateServiceMeshArgs and GetServiceTemplateServiceMeshOutput values. +// You can construct a concrete instance of `GetServiceTemplateServiceMeshInput` via: +// +// GetServiceTemplateServiceMeshArgs{...} +type GetServiceTemplateServiceMeshInput interface { + pulumi.Input + + ToGetServiceTemplateServiceMeshOutput() GetServiceTemplateServiceMeshOutput + ToGetServiceTemplateServiceMeshOutputWithContext(context.Context) GetServiceTemplateServiceMeshOutput +} + +type GetServiceTemplateServiceMeshArgs struct { + // The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + Mesh pulumi.StringInput `pulumi:"mesh"` +} + +func (GetServiceTemplateServiceMeshArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetServiceTemplateServiceMesh)(nil)).Elem() +} + +func (i GetServiceTemplateServiceMeshArgs) ToGetServiceTemplateServiceMeshOutput() GetServiceTemplateServiceMeshOutput { + return i.ToGetServiceTemplateServiceMeshOutputWithContext(context.Background()) +} + +func (i GetServiceTemplateServiceMeshArgs) ToGetServiceTemplateServiceMeshOutputWithContext(ctx context.Context) GetServiceTemplateServiceMeshOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetServiceTemplateServiceMeshOutput) +} + +// GetServiceTemplateServiceMeshArrayInput is an input type that accepts GetServiceTemplateServiceMeshArray and GetServiceTemplateServiceMeshArrayOutput values. +// You can construct a concrete instance of `GetServiceTemplateServiceMeshArrayInput` via: +// +// GetServiceTemplateServiceMeshArray{ GetServiceTemplateServiceMeshArgs{...} } +type GetServiceTemplateServiceMeshArrayInput interface { + pulumi.Input + + ToGetServiceTemplateServiceMeshArrayOutput() GetServiceTemplateServiceMeshArrayOutput + ToGetServiceTemplateServiceMeshArrayOutputWithContext(context.Context) GetServiceTemplateServiceMeshArrayOutput +} + +type GetServiceTemplateServiceMeshArray []GetServiceTemplateServiceMeshInput + +func (GetServiceTemplateServiceMeshArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetServiceTemplateServiceMesh)(nil)).Elem() +} + +func (i GetServiceTemplateServiceMeshArray) ToGetServiceTemplateServiceMeshArrayOutput() GetServiceTemplateServiceMeshArrayOutput { + return i.ToGetServiceTemplateServiceMeshArrayOutputWithContext(context.Background()) +} + +func (i GetServiceTemplateServiceMeshArray) ToGetServiceTemplateServiceMeshArrayOutputWithContext(ctx context.Context) GetServiceTemplateServiceMeshArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetServiceTemplateServiceMeshArrayOutput) +} + +type GetServiceTemplateServiceMeshOutput struct{ *pulumi.OutputState } + +func (GetServiceTemplateServiceMeshOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetServiceTemplateServiceMesh)(nil)).Elem() +} + +func (o GetServiceTemplateServiceMeshOutput) ToGetServiceTemplateServiceMeshOutput() GetServiceTemplateServiceMeshOutput { + return o +} + +func (o GetServiceTemplateServiceMeshOutput) ToGetServiceTemplateServiceMeshOutputWithContext(ctx context.Context) GetServiceTemplateServiceMeshOutput { + return o +} + +// The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. +func (o GetServiceTemplateServiceMeshOutput) Mesh() pulumi.StringOutput { + return o.ApplyT(func(v GetServiceTemplateServiceMesh) string { return v.Mesh }).(pulumi.StringOutput) +} + +type GetServiceTemplateServiceMeshArrayOutput struct{ *pulumi.OutputState } + +func (GetServiceTemplateServiceMeshArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetServiceTemplateServiceMesh)(nil)).Elem() +} + +func (o GetServiceTemplateServiceMeshArrayOutput) ToGetServiceTemplateServiceMeshArrayOutput() GetServiceTemplateServiceMeshArrayOutput { + return o +} + +func (o GetServiceTemplateServiceMeshArrayOutput) ToGetServiceTemplateServiceMeshArrayOutputWithContext(ctx context.Context) GetServiceTemplateServiceMeshArrayOutput { + return o +} + +func (o GetServiceTemplateServiceMeshArrayOutput) Index(i pulumi.IntInput) GetServiceTemplateServiceMeshOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetServiceTemplateServiceMesh { + return vs[0].([]GetServiceTemplateServiceMesh)[vs[1].(int)] + }).(GetServiceTemplateServiceMeshOutput) +} + type GetServiceTemplateVolume struct { // For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. CloudSqlInstances []GetServiceTemplateVolumeCloudSqlInstance `pulumi:"cloudSqlInstances"` // Ephemeral storage used as a shared volume. EmptyDirs []GetServiceTemplateVolumeEmptyDir `pulumi:"emptyDirs"` - // Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + // Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. Gcs []GetServiceTemplateVolumeGc `pulumi:"gcs"` // The name of the Cloud Run v2 Service. Name string `pulumi:"name"` @@ -15543,7 +15809,7 @@ type GetServiceTemplateVolumeArgs struct { CloudSqlInstances GetServiceTemplateVolumeCloudSqlInstanceArrayInput `pulumi:"cloudSqlInstances"` // Ephemeral storage used as a shared volume. EmptyDirs GetServiceTemplateVolumeEmptyDirArrayInput `pulumi:"emptyDirs"` - // Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + // Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. Gcs GetServiceTemplateVolumeGcArrayInput `pulumi:"gcs"` // The name of the Cloud Run v2 Service. Name pulumi.StringInput `pulumi:"name"` @@ -15616,7 +15882,7 @@ func (o GetServiceTemplateVolumeOutput) EmptyDirs() GetServiceTemplateVolumeEmpt return o.ApplyT(func(v GetServiceTemplateVolume) []GetServiceTemplateVolumeEmptyDir { return v.EmptyDirs }).(GetServiceTemplateVolumeEmptyDirArrayOutput) } -// Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. +// Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. func (o GetServiceTemplateVolumeOutput) Gcs() GetServiceTemplateVolumeGcArrayOutput { return o.ApplyT(func(v GetServiceTemplateVolume) []GetServiceTemplateVolumeGc { return v.Gcs }).(GetServiceTemplateVolumeGcArrayOutput) } @@ -17068,6 +17334,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*ServiceTemplateContainerVolumeMountArrayInput)(nil)).Elem(), ServiceTemplateContainerVolumeMountArray{}) pulumi.RegisterInputType(reflect.TypeOf((*ServiceTemplateScalingInput)(nil)).Elem(), ServiceTemplateScalingArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ServiceTemplateScalingPtrInput)(nil)).Elem(), ServiceTemplateScalingArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ServiceTemplateServiceMeshInput)(nil)).Elem(), ServiceTemplateServiceMeshArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ServiceTemplateServiceMeshPtrInput)(nil)).Elem(), ServiceTemplateServiceMeshArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ServiceTemplateVolumeInput)(nil)).Elem(), ServiceTemplateVolumeArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ServiceTemplateVolumeArrayInput)(nil)).Elem(), ServiceTemplateVolumeArray{}) pulumi.RegisterInputType(reflect.TypeOf((*ServiceTemplateVolumeCloudSqlInstanceInput)(nil)).Elem(), ServiceTemplateVolumeCloudSqlInstanceArgs{}) @@ -17180,6 +17448,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetServiceTemplateContainerVolumeMountArrayInput)(nil)).Elem(), GetServiceTemplateContainerVolumeMountArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetServiceTemplateScalingInput)(nil)).Elem(), GetServiceTemplateScalingArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetServiceTemplateScalingArrayInput)(nil)).Elem(), GetServiceTemplateScalingArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetServiceTemplateServiceMeshInput)(nil)).Elem(), GetServiceTemplateServiceMeshArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetServiceTemplateServiceMeshArrayInput)(nil)).Elem(), GetServiceTemplateServiceMeshArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetServiceTemplateVolumeInput)(nil)).Elem(), GetServiceTemplateVolumeArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetServiceTemplateVolumeArrayInput)(nil)).Elem(), GetServiceTemplateVolumeArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetServiceTemplateVolumeCloudSqlInstanceInput)(nil)).Elem(), GetServiceTemplateVolumeCloudSqlInstanceArgs{}) @@ -17300,6 +17570,8 @@ func init() { pulumi.RegisterOutputType(ServiceTemplateContainerVolumeMountArrayOutput{}) pulumi.RegisterOutputType(ServiceTemplateScalingOutput{}) pulumi.RegisterOutputType(ServiceTemplateScalingPtrOutput{}) + pulumi.RegisterOutputType(ServiceTemplateServiceMeshOutput{}) + pulumi.RegisterOutputType(ServiceTemplateServiceMeshPtrOutput{}) pulumi.RegisterOutputType(ServiceTemplateVolumeOutput{}) pulumi.RegisterOutputType(ServiceTemplateVolumeArrayOutput{}) pulumi.RegisterOutputType(ServiceTemplateVolumeCloudSqlInstanceOutput{}) @@ -17412,6 +17684,8 @@ func init() { pulumi.RegisterOutputType(GetServiceTemplateContainerVolumeMountArrayOutput{}) pulumi.RegisterOutputType(GetServiceTemplateScalingOutput{}) pulumi.RegisterOutputType(GetServiceTemplateScalingArrayOutput{}) + pulumi.RegisterOutputType(GetServiceTemplateServiceMeshOutput{}) + pulumi.RegisterOutputType(GetServiceTemplateServiceMeshArrayOutput{}) pulumi.RegisterOutputType(GetServiceTemplateVolumeOutput{}) pulumi.RegisterOutputType(GetServiceTemplateVolumeArrayOutput{}) pulumi.RegisterOutputType(GetServiceTemplateVolumeCloudSqlInstanceOutput{}) diff --git a/sdk/go/gcp/cloudrunv2/service.go b/sdk/go/gcp/cloudrunv2/service.go index fab48ba935..dce4b2ea77 100644 --- a/sdk/go/gcp/cloudrunv2/service.go +++ b/sdk/go/gcp/cloudrunv2/service.go @@ -578,7 +578,6 @@ import ( // Name: pulumi.String("cloudrun-service"), // Location: pulumi.String("us-central1"), // DeletionProtection: pulumi.Bool(false), -// LaunchStage: pulumi.String("BETA"), // Template: &cloudrunv2.ServiceTemplateArgs{ // ExecutionEnvironment: pulumi.String("EXECUTION_ENVIRONMENT_GEN2"), // Containers: cloudrunv2.ServiceTemplateContainerArray{ @@ -651,7 +650,6 @@ import ( // Location: pulumi.String("us-central1"), // DeletionProtection: pulumi.Bool(false), // Ingress: pulumi.String("INGRESS_TRAFFIC_ALL"), -// LaunchStage: pulumi.String("BETA"), // Template: &cloudrunv2.ServiceTemplateArgs{ // ExecutionEnvironment: pulumi.String("EXECUTION_ENVIRONMENT_GEN2"), // Containers: cloudrunv2.ServiceTemplateContainerArray{ @@ -695,6 +693,62 @@ import ( // } // // ``` +// ### Cloudrunv2 Service Mesh +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudrunv2" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/networkservices" +// "github.com/pulumi/pulumi-time/sdk/go/time" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// mesh, err := networkservices.NewMesh(ctx, "mesh", &networkservices.MeshArgs{ +// Name: pulumi.String("network-services-mesh"), +// }) +// if err != nil { +// return err +// } +// waitForMesh, err := time.NewSleep(ctx, "wait_for_mesh", &time.SleepArgs{ +// CreateDuration: "1m", +// }, pulumi.DependsOn([]pulumi.Resource{ +// mesh, +// })) +// if err != nil { +// return err +// } +// _, err = cloudrunv2.NewService(ctx, "default", &cloudrunv2.ServiceArgs{ +// Name: pulumi.String("cloudrun-service"), +// DeletionProtection: pulumi.Bool(false), +// Location: pulumi.String("us-central1"), +// LaunchStage: pulumi.String("BETA"), +// Template: &cloudrunv2.ServiceTemplateArgs{ +// Containers: cloudrunv2.ServiceTemplateContainerArray{ +// &cloudrunv2.ServiceTemplateContainerArgs{ +// Image: pulumi.String("us-docker.pkg.dev/cloudrun/container/hello"), +// }, +// }, +// ServiceMesh: &cloudrunv2.ServiceTemplateServiceMeshArgs{ +// Mesh: mesh.ID(), +// }, +// }, +// }, pulumi.DependsOn([]pulumi.Resource{ +// waitForMesh, +// })) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` // // ## Import // diff --git a/sdk/go/gcp/cloudtasks/pulumiTypes.go b/sdk/go/gcp/cloudtasks/pulumiTypes.go index 6a70bf6e44..5740d503e4 100644 --- a/sdk/go/gcp/cloudtasks/pulumiTypes.go +++ b/sdk/go/gcp/cloudtasks/pulumiTypes.go @@ -223,6 +223,1358 @@ func (o QueueAppEngineRoutingOverridePtrOutput) Version() pulumi.StringPtrOutput }).(pulumi.StringPtrOutput) } +type QueueHttpTarget struct { + // HTTP target headers. + // This map contains the header field names and values. + // Headers will be set when running the CreateTask and/or BufferTask. + // These headers represent a subset of the headers that will be configured for the task's HTTP request. + // Some HTTP request headers will be ignored or replaced. + // Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + // The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + // Structure is documented below. + HeaderOverrides []QueueHttpTargetHeaderOverride `pulumi:"headerOverrides"` + // The HTTP method to use for the request. + // When specified, it overrides HttpRequest for the task. + // Note that if the value is set to GET the body of the task will be ignored at execution time. + // Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + HttpMethod *string `pulumi:"httpMethod"` + // If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + // This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + // Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + // Structure is documented below. + OauthToken *QueueHttpTargetOauthToken `pulumi:"oauthToken"` + // If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + // This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + // Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + // Structure is documented below. + OidcToken *QueueHttpTargetOidcToken `pulumi:"oidcToken"` + // URI override. + // When specified, overrides the execution URI for all the tasks in the queue. + // Structure is documented below. + UriOverride *QueueHttpTargetUriOverride `pulumi:"uriOverride"` +} + +// QueueHttpTargetInput is an input type that accepts QueueHttpTargetArgs and QueueHttpTargetOutput values. +// You can construct a concrete instance of `QueueHttpTargetInput` via: +// +// QueueHttpTargetArgs{...} +type QueueHttpTargetInput interface { + pulumi.Input + + ToQueueHttpTargetOutput() QueueHttpTargetOutput + ToQueueHttpTargetOutputWithContext(context.Context) QueueHttpTargetOutput +} + +type QueueHttpTargetArgs struct { + // HTTP target headers. + // This map contains the header field names and values. + // Headers will be set when running the CreateTask and/or BufferTask. + // These headers represent a subset of the headers that will be configured for the task's HTTP request. + // Some HTTP request headers will be ignored or replaced. + // Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + // The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + // Structure is documented below. + HeaderOverrides QueueHttpTargetHeaderOverrideArrayInput `pulumi:"headerOverrides"` + // The HTTP method to use for the request. + // When specified, it overrides HttpRequest for the task. + // Note that if the value is set to GET the body of the task will be ignored at execution time. + // Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + HttpMethod pulumi.StringPtrInput `pulumi:"httpMethod"` + // If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + // This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + // Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + // Structure is documented below. + OauthToken QueueHttpTargetOauthTokenPtrInput `pulumi:"oauthToken"` + // If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + // This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + // Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + // Structure is documented below. + OidcToken QueueHttpTargetOidcTokenPtrInput `pulumi:"oidcToken"` + // URI override. + // When specified, overrides the execution URI for all the tasks in the queue. + // Structure is documented below. + UriOverride QueueHttpTargetUriOverridePtrInput `pulumi:"uriOverride"` +} + +func (QueueHttpTargetArgs) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTarget)(nil)).Elem() +} + +func (i QueueHttpTargetArgs) ToQueueHttpTargetOutput() QueueHttpTargetOutput { + return i.ToQueueHttpTargetOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetArgs) ToQueueHttpTargetOutputWithContext(ctx context.Context) QueueHttpTargetOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetOutput) +} + +func (i QueueHttpTargetArgs) ToQueueHttpTargetPtrOutput() QueueHttpTargetPtrOutput { + return i.ToQueueHttpTargetPtrOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetArgs) ToQueueHttpTargetPtrOutputWithContext(ctx context.Context) QueueHttpTargetPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetOutput).ToQueueHttpTargetPtrOutputWithContext(ctx) +} + +// QueueHttpTargetPtrInput is an input type that accepts QueueHttpTargetArgs, QueueHttpTargetPtr and QueueHttpTargetPtrOutput values. +// You can construct a concrete instance of `QueueHttpTargetPtrInput` via: +// +// QueueHttpTargetArgs{...} +// +// or: +// +// nil +type QueueHttpTargetPtrInput interface { + pulumi.Input + + ToQueueHttpTargetPtrOutput() QueueHttpTargetPtrOutput + ToQueueHttpTargetPtrOutputWithContext(context.Context) QueueHttpTargetPtrOutput +} + +type queueHttpTargetPtrType QueueHttpTargetArgs + +func QueueHttpTargetPtr(v *QueueHttpTargetArgs) QueueHttpTargetPtrInput { + return (*queueHttpTargetPtrType)(v) +} + +func (*queueHttpTargetPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**QueueHttpTarget)(nil)).Elem() +} + +func (i *queueHttpTargetPtrType) ToQueueHttpTargetPtrOutput() QueueHttpTargetPtrOutput { + return i.ToQueueHttpTargetPtrOutputWithContext(context.Background()) +} + +func (i *queueHttpTargetPtrType) ToQueueHttpTargetPtrOutputWithContext(ctx context.Context) QueueHttpTargetPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetPtrOutput) +} + +type QueueHttpTargetOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetOutput) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTarget)(nil)).Elem() +} + +func (o QueueHttpTargetOutput) ToQueueHttpTargetOutput() QueueHttpTargetOutput { + return o +} + +func (o QueueHttpTargetOutput) ToQueueHttpTargetOutputWithContext(ctx context.Context) QueueHttpTargetOutput { + return o +} + +func (o QueueHttpTargetOutput) ToQueueHttpTargetPtrOutput() QueueHttpTargetPtrOutput { + return o.ToQueueHttpTargetPtrOutputWithContext(context.Background()) +} + +func (o QueueHttpTargetOutput) ToQueueHttpTargetPtrOutputWithContext(ctx context.Context) QueueHttpTargetPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v QueueHttpTarget) *QueueHttpTarget { + return &v + }).(QueueHttpTargetPtrOutput) +} + +// HTTP target headers. +// This map contains the header field names and values. +// Headers will be set when running the CreateTask and/or BufferTask. +// These headers represent a subset of the headers that will be configured for the task's HTTP request. +// Some HTTP request headers will be ignored or replaced. +// Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. +// The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. +// Structure is documented below. +func (o QueueHttpTargetOutput) HeaderOverrides() QueueHttpTargetHeaderOverrideArrayOutput { + return o.ApplyT(func(v QueueHttpTarget) []QueueHttpTargetHeaderOverride { return v.HeaderOverrides }).(QueueHttpTargetHeaderOverrideArrayOutput) +} + +// The HTTP method to use for the request. +// When specified, it overrides HttpRequest for the task. +// Note that if the value is set to GET the body of the task will be ignored at execution time. +// Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. +func (o QueueHttpTargetOutput) HttpMethod() pulumi.StringPtrOutput { + return o.ApplyT(func(v QueueHttpTarget) *string { return v.HttpMethod }).(pulumi.StringPtrOutput) +} + +// If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. +// This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. +// Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. +// Structure is documented below. +func (o QueueHttpTargetOutput) OauthToken() QueueHttpTargetOauthTokenPtrOutput { + return o.ApplyT(func(v QueueHttpTarget) *QueueHttpTargetOauthToken { return v.OauthToken }).(QueueHttpTargetOauthTokenPtrOutput) +} + +// If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. +// This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. +// Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. +// Structure is documented below. +func (o QueueHttpTargetOutput) OidcToken() QueueHttpTargetOidcTokenPtrOutput { + return o.ApplyT(func(v QueueHttpTarget) *QueueHttpTargetOidcToken { return v.OidcToken }).(QueueHttpTargetOidcTokenPtrOutput) +} + +// URI override. +// When specified, overrides the execution URI for all the tasks in the queue. +// Structure is documented below. +func (o QueueHttpTargetOutput) UriOverride() QueueHttpTargetUriOverridePtrOutput { + return o.ApplyT(func(v QueueHttpTarget) *QueueHttpTargetUriOverride { return v.UriOverride }).(QueueHttpTargetUriOverridePtrOutput) +} + +type QueueHttpTargetPtrOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**QueueHttpTarget)(nil)).Elem() +} + +func (o QueueHttpTargetPtrOutput) ToQueueHttpTargetPtrOutput() QueueHttpTargetPtrOutput { + return o +} + +func (o QueueHttpTargetPtrOutput) ToQueueHttpTargetPtrOutputWithContext(ctx context.Context) QueueHttpTargetPtrOutput { + return o +} + +func (o QueueHttpTargetPtrOutput) Elem() QueueHttpTargetOutput { + return o.ApplyT(func(v *QueueHttpTarget) QueueHttpTarget { + if v != nil { + return *v + } + var ret QueueHttpTarget + return ret + }).(QueueHttpTargetOutput) +} + +// HTTP target headers. +// This map contains the header field names and values. +// Headers will be set when running the CreateTask and/or BufferTask. +// These headers represent a subset of the headers that will be configured for the task's HTTP request. +// Some HTTP request headers will be ignored or replaced. +// Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. +// The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. +// Structure is documented below. +func (o QueueHttpTargetPtrOutput) HeaderOverrides() QueueHttpTargetHeaderOverrideArrayOutput { + return o.ApplyT(func(v *QueueHttpTarget) []QueueHttpTargetHeaderOverride { + if v == nil { + return nil + } + return v.HeaderOverrides + }).(QueueHttpTargetHeaderOverrideArrayOutput) +} + +// The HTTP method to use for the request. +// When specified, it overrides HttpRequest for the task. +// Note that if the value is set to GET the body of the task will be ignored at execution time. +// Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. +func (o QueueHttpTargetPtrOutput) HttpMethod() pulumi.StringPtrOutput { + return o.ApplyT(func(v *QueueHttpTarget) *string { + if v == nil { + return nil + } + return v.HttpMethod + }).(pulumi.StringPtrOutput) +} + +// If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. +// This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. +// Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. +// Structure is documented below. +func (o QueueHttpTargetPtrOutput) OauthToken() QueueHttpTargetOauthTokenPtrOutput { + return o.ApplyT(func(v *QueueHttpTarget) *QueueHttpTargetOauthToken { + if v == nil { + return nil + } + return v.OauthToken + }).(QueueHttpTargetOauthTokenPtrOutput) +} + +// If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. +// This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. +// Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. +// Structure is documented below. +func (o QueueHttpTargetPtrOutput) OidcToken() QueueHttpTargetOidcTokenPtrOutput { + return o.ApplyT(func(v *QueueHttpTarget) *QueueHttpTargetOidcToken { + if v == nil { + return nil + } + return v.OidcToken + }).(QueueHttpTargetOidcTokenPtrOutput) +} + +// URI override. +// When specified, overrides the execution URI for all the tasks in the queue. +// Structure is documented below. +func (o QueueHttpTargetPtrOutput) UriOverride() QueueHttpTargetUriOverridePtrOutput { + return o.ApplyT(func(v *QueueHttpTarget) *QueueHttpTargetUriOverride { + if v == nil { + return nil + } + return v.UriOverride + }).(QueueHttpTargetUriOverridePtrOutput) +} + +type QueueHttpTargetHeaderOverride struct { + // Header embodying a key and a value. + // Structure is documented below. + Header QueueHttpTargetHeaderOverrideHeader `pulumi:"header"` +} + +// QueueHttpTargetHeaderOverrideInput is an input type that accepts QueueHttpTargetHeaderOverrideArgs and QueueHttpTargetHeaderOverrideOutput values. +// You can construct a concrete instance of `QueueHttpTargetHeaderOverrideInput` via: +// +// QueueHttpTargetHeaderOverrideArgs{...} +type QueueHttpTargetHeaderOverrideInput interface { + pulumi.Input + + ToQueueHttpTargetHeaderOverrideOutput() QueueHttpTargetHeaderOverrideOutput + ToQueueHttpTargetHeaderOverrideOutputWithContext(context.Context) QueueHttpTargetHeaderOverrideOutput +} + +type QueueHttpTargetHeaderOverrideArgs struct { + // Header embodying a key and a value. + // Structure is documented below. + Header QueueHttpTargetHeaderOverrideHeaderInput `pulumi:"header"` +} + +func (QueueHttpTargetHeaderOverrideArgs) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTargetHeaderOverride)(nil)).Elem() +} + +func (i QueueHttpTargetHeaderOverrideArgs) ToQueueHttpTargetHeaderOverrideOutput() QueueHttpTargetHeaderOverrideOutput { + return i.ToQueueHttpTargetHeaderOverrideOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetHeaderOverrideArgs) ToQueueHttpTargetHeaderOverrideOutputWithContext(ctx context.Context) QueueHttpTargetHeaderOverrideOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetHeaderOverrideOutput) +} + +// QueueHttpTargetHeaderOverrideArrayInput is an input type that accepts QueueHttpTargetHeaderOverrideArray and QueueHttpTargetHeaderOverrideArrayOutput values. +// You can construct a concrete instance of `QueueHttpTargetHeaderOverrideArrayInput` via: +// +// QueueHttpTargetHeaderOverrideArray{ QueueHttpTargetHeaderOverrideArgs{...} } +type QueueHttpTargetHeaderOverrideArrayInput interface { + pulumi.Input + + ToQueueHttpTargetHeaderOverrideArrayOutput() QueueHttpTargetHeaderOverrideArrayOutput + ToQueueHttpTargetHeaderOverrideArrayOutputWithContext(context.Context) QueueHttpTargetHeaderOverrideArrayOutput +} + +type QueueHttpTargetHeaderOverrideArray []QueueHttpTargetHeaderOverrideInput + +func (QueueHttpTargetHeaderOverrideArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]QueueHttpTargetHeaderOverride)(nil)).Elem() +} + +func (i QueueHttpTargetHeaderOverrideArray) ToQueueHttpTargetHeaderOverrideArrayOutput() QueueHttpTargetHeaderOverrideArrayOutput { + return i.ToQueueHttpTargetHeaderOverrideArrayOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetHeaderOverrideArray) ToQueueHttpTargetHeaderOverrideArrayOutputWithContext(ctx context.Context) QueueHttpTargetHeaderOverrideArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetHeaderOverrideArrayOutput) +} + +type QueueHttpTargetHeaderOverrideOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetHeaderOverrideOutput) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTargetHeaderOverride)(nil)).Elem() +} + +func (o QueueHttpTargetHeaderOverrideOutput) ToQueueHttpTargetHeaderOverrideOutput() QueueHttpTargetHeaderOverrideOutput { + return o +} + +func (o QueueHttpTargetHeaderOverrideOutput) ToQueueHttpTargetHeaderOverrideOutputWithContext(ctx context.Context) QueueHttpTargetHeaderOverrideOutput { + return o +} + +// Header embodying a key and a value. +// Structure is documented below. +func (o QueueHttpTargetHeaderOverrideOutput) Header() QueueHttpTargetHeaderOverrideHeaderOutput { + return o.ApplyT(func(v QueueHttpTargetHeaderOverride) QueueHttpTargetHeaderOverrideHeader { return v.Header }).(QueueHttpTargetHeaderOverrideHeaderOutput) +} + +type QueueHttpTargetHeaderOverrideArrayOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetHeaderOverrideArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]QueueHttpTargetHeaderOverride)(nil)).Elem() +} + +func (o QueueHttpTargetHeaderOverrideArrayOutput) ToQueueHttpTargetHeaderOverrideArrayOutput() QueueHttpTargetHeaderOverrideArrayOutput { + return o +} + +func (o QueueHttpTargetHeaderOverrideArrayOutput) ToQueueHttpTargetHeaderOverrideArrayOutputWithContext(ctx context.Context) QueueHttpTargetHeaderOverrideArrayOutput { + return o +} + +func (o QueueHttpTargetHeaderOverrideArrayOutput) Index(i pulumi.IntInput) QueueHttpTargetHeaderOverrideOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) QueueHttpTargetHeaderOverride { + return vs[0].([]QueueHttpTargetHeaderOverride)[vs[1].(int)] + }).(QueueHttpTargetHeaderOverrideOutput) +} + +type QueueHttpTargetHeaderOverrideHeader struct { + // The Key of the header. + Key string `pulumi:"key"` + // The Value of the header. + Value string `pulumi:"value"` +} + +// QueueHttpTargetHeaderOverrideHeaderInput is an input type that accepts QueueHttpTargetHeaderOverrideHeaderArgs and QueueHttpTargetHeaderOverrideHeaderOutput values. +// You can construct a concrete instance of `QueueHttpTargetHeaderOverrideHeaderInput` via: +// +// QueueHttpTargetHeaderOverrideHeaderArgs{...} +type QueueHttpTargetHeaderOverrideHeaderInput interface { + pulumi.Input + + ToQueueHttpTargetHeaderOverrideHeaderOutput() QueueHttpTargetHeaderOverrideHeaderOutput + ToQueueHttpTargetHeaderOverrideHeaderOutputWithContext(context.Context) QueueHttpTargetHeaderOverrideHeaderOutput +} + +type QueueHttpTargetHeaderOverrideHeaderArgs struct { + // The Key of the header. + Key pulumi.StringInput `pulumi:"key"` + // The Value of the header. + Value pulumi.StringInput `pulumi:"value"` +} + +func (QueueHttpTargetHeaderOverrideHeaderArgs) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTargetHeaderOverrideHeader)(nil)).Elem() +} + +func (i QueueHttpTargetHeaderOverrideHeaderArgs) ToQueueHttpTargetHeaderOverrideHeaderOutput() QueueHttpTargetHeaderOverrideHeaderOutput { + return i.ToQueueHttpTargetHeaderOverrideHeaderOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetHeaderOverrideHeaderArgs) ToQueueHttpTargetHeaderOverrideHeaderOutputWithContext(ctx context.Context) QueueHttpTargetHeaderOverrideHeaderOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetHeaderOverrideHeaderOutput) +} + +type QueueHttpTargetHeaderOverrideHeaderOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetHeaderOverrideHeaderOutput) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTargetHeaderOverrideHeader)(nil)).Elem() +} + +func (o QueueHttpTargetHeaderOverrideHeaderOutput) ToQueueHttpTargetHeaderOverrideHeaderOutput() QueueHttpTargetHeaderOverrideHeaderOutput { + return o +} + +func (o QueueHttpTargetHeaderOverrideHeaderOutput) ToQueueHttpTargetHeaderOverrideHeaderOutputWithContext(ctx context.Context) QueueHttpTargetHeaderOverrideHeaderOutput { + return o +} + +// The Key of the header. +func (o QueueHttpTargetHeaderOverrideHeaderOutput) Key() pulumi.StringOutput { + return o.ApplyT(func(v QueueHttpTargetHeaderOverrideHeader) string { return v.Key }).(pulumi.StringOutput) +} + +// The Value of the header. +func (o QueueHttpTargetHeaderOverrideHeaderOutput) Value() pulumi.StringOutput { + return o.ApplyT(func(v QueueHttpTargetHeaderOverrideHeader) string { return v.Value }).(pulumi.StringOutput) +} + +type QueueHttpTargetOauthToken struct { + // OAuth scope to be used for generating OAuth access token. + // If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + Scope *string `pulumi:"scope"` + // Service account email to be used for generating OAuth token. + // The service account must be within the same project as the queue. + // The caller must have iam.serviceAccounts.actAs permission for the service account. + ServiceAccountEmail string `pulumi:"serviceAccountEmail"` +} + +// QueueHttpTargetOauthTokenInput is an input type that accepts QueueHttpTargetOauthTokenArgs and QueueHttpTargetOauthTokenOutput values. +// You can construct a concrete instance of `QueueHttpTargetOauthTokenInput` via: +// +// QueueHttpTargetOauthTokenArgs{...} +type QueueHttpTargetOauthTokenInput interface { + pulumi.Input + + ToQueueHttpTargetOauthTokenOutput() QueueHttpTargetOauthTokenOutput + ToQueueHttpTargetOauthTokenOutputWithContext(context.Context) QueueHttpTargetOauthTokenOutput +} + +type QueueHttpTargetOauthTokenArgs struct { + // OAuth scope to be used for generating OAuth access token. + // If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + Scope pulumi.StringPtrInput `pulumi:"scope"` + // Service account email to be used for generating OAuth token. + // The service account must be within the same project as the queue. + // The caller must have iam.serviceAccounts.actAs permission for the service account. + ServiceAccountEmail pulumi.StringInput `pulumi:"serviceAccountEmail"` +} + +func (QueueHttpTargetOauthTokenArgs) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTargetOauthToken)(nil)).Elem() +} + +func (i QueueHttpTargetOauthTokenArgs) ToQueueHttpTargetOauthTokenOutput() QueueHttpTargetOauthTokenOutput { + return i.ToQueueHttpTargetOauthTokenOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetOauthTokenArgs) ToQueueHttpTargetOauthTokenOutputWithContext(ctx context.Context) QueueHttpTargetOauthTokenOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetOauthTokenOutput) +} + +func (i QueueHttpTargetOauthTokenArgs) ToQueueHttpTargetOauthTokenPtrOutput() QueueHttpTargetOauthTokenPtrOutput { + return i.ToQueueHttpTargetOauthTokenPtrOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetOauthTokenArgs) ToQueueHttpTargetOauthTokenPtrOutputWithContext(ctx context.Context) QueueHttpTargetOauthTokenPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetOauthTokenOutput).ToQueueHttpTargetOauthTokenPtrOutputWithContext(ctx) +} + +// QueueHttpTargetOauthTokenPtrInput is an input type that accepts QueueHttpTargetOauthTokenArgs, QueueHttpTargetOauthTokenPtr and QueueHttpTargetOauthTokenPtrOutput values. +// You can construct a concrete instance of `QueueHttpTargetOauthTokenPtrInput` via: +// +// QueueHttpTargetOauthTokenArgs{...} +// +// or: +// +// nil +type QueueHttpTargetOauthTokenPtrInput interface { + pulumi.Input + + ToQueueHttpTargetOauthTokenPtrOutput() QueueHttpTargetOauthTokenPtrOutput + ToQueueHttpTargetOauthTokenPtrOutputWithContext(context.Context) QueueHttpTargetOauthTokenPtrOutput +} + +type queueHttpTargetOauthTokenPtrType QueueHttpTargetOauthTokenArgs + +func QueueHttpTargetOauthTokenPtr(v *QueueHttpTargetOauthTokenArgs) QueueHttpTargetOauthTokenPtrInput { + return (*queueHttpTargetOauthTokenPtrType)(v) +} + +func (*queueHttpTargetOauthTokenPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**QueueHttpTargetOauthToken)(nil)).Elem() +} + +func (i *queueHttpTargetOauthTokenPtrType) ToQueueHttpTargetOauthTokenPtrOutput() QueueHttpTargetOauthTokenPtrOutput { + return i.ToQueueHttpTargetOauthTokenPtrOutputWithContext(context.Background()) +} + +func (i *queueHttpTargetOauthTokenPtrType) ToQueueHttpTargetOauthTokenPtrOutputWithContext(ctx context.Context) QueueHttpTargetOauthTokenPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetOauthTokenPtrOutput) +} + +type QueueHttpTargetOauthTokenOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetOauthTokenOutput) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTargetOauthToken)(nil)).Elem() +} + +func (o QueueHttpTargetOauthTokenOutput) ToQueueHttpTargetOauthTokenOutput() QueueHttpTargetOauthTokenOutput { + return o +} + +func (o QueueHttpTargetOauthTokenOutput) ToQueueHttpTargetOauthTokenOutputWithContext(ctx context.Context) QueueHttpTargetOauthTokenOutput { + return o +} + +func (o QueueHttpTargetOauthTokenOutput) ToQueueHttpTargetOauthTokenPtrOutput() QueueHttpTargetOauthTokenPtrOutput { + return o.ToQueueHttpTargetOauthTokenPtrOutputWithContext(context.Background()) +} + +func (o QueueHttpTargetOauthTokenOutput) ToQueueHttpTargetOauthTokenPtrOutputWithContext(ctx context.Context) QueueHttpTargetOauthTokenPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v QueueHttpTargetOauthToken) *QueueHttpTargetOauthToken { + return &v + }).(QueueHttpTargetOauthTokenPtrOutput) +} + +// OAuth scope to be used for generating OAuth access token. +// If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. +func (o QueueHttpTargetOauthTokenOutput) Scope() pulumi.StringPtrOutput { + return o.ApplyT(func(v QueueHttpTargetOauthToken) *string { return v.Scope }).(pulumi.StringPtrOutput) +} + +// Service account email to be used for generating OAuth token. +// The service account must be within the same project as the queue. +// The caller must have iam.serviceAccounts.actAs permission for the service account. +func (o QueueHttpTargetOauthTokenOutput) ServiceAccountEmail() pulumi.StringOutput { + return o.ApplyT(func(v QueueHttpTargetOauthToken) string { return v.ServiceAccountEmail }).(pulumi.StringOutput) +} + +type QueueHttpTargetOauthTokenPtrOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetOauthTokenPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**QueueHttpTargetOauthToken)(nil)).Elem() +} + +func (o QueueHttpTargetOauthTokenPtrOutput) ToQueueHttpTargetOauthTokenPtrOutput() QueueHttpTargetOauthTokenPtrOutput { + return o +} + +func (o QueueHttpTargetOauthTokenPtrOutput) ToQueueHttpTargetOauthTokenPtrOutputWithContext(ctx context.Context) QueueHttpTargetOauthTokenPtrOutput { + return o +} + +func (o QueueHttpTargetOauthTokenPtrOutput) Elem() QueueHttpTargetOauthTokenOutput { + return o.ApplyT(func(v *QueueHttpTargetOauthToken) QueueHttpTargetOauthToken { + if v != nil { + return *v + } + var ret QueueHttpTargetOauthToken + return ret + }).(QueueHttpTargetOauthTokenOutput) +} + +// OAuth scope to be used for generating OAuth access token. +// If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. +func (o QueueHttpTargetOauthTokenPtrOutput) Scope() pulumi.StringPtrOutput { + return o.ApplyT(func(v *QueueHttpTargetOauthToken) *string { + if v == nil { + return nil + } + return v.Scope + }).(pulumi.StringPtrOutput) +} + +// Service account email to be used for generating OAuth token. +// The service account must be within the same project as the queue. +// The caller must have iam.serviceAccounts.actAs permission for the service account. +func (o QueueHttpTargetOauthTokenPtrOutput) ServiceAccountEmail() pulumi.StringPtrOutput { + return o.ApplyT(func(v *QueueHttpTargetOauthToken) *string { + if v == nil { + return nil + } + return &v.ServiceAccountEmail + }).(pulumi.StringPtrOutput) +} + +type QueueHttpTargetOidcToken struct { + // Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + Audience *string `pulumi:"audience"` + // Service account email to be used for generating OIDC token. + // The service account must be within the same project as the queue. + // The caller must have iam.serviceAccounts.actAs permission for the service account. + ServiceAccountEmail string `pulumi:"serviceAccountEmail"` +} + +// QueueHttpTargetOidcTokenInput is an input type that accepts QueueHttpTargetOidcTokenArgs and QueueHttpTargetOidcTokenOutput values. +// You can construct a concrete instance of `QueueHttpTargetOidcTokenInput` via: +// +// QueueHttpTargetOidcTokenArgs{...} +type QueueHttpTargetOidcTokenInput interface { + pulumi.Input + + ToQueueHttpTargetOidcTokenOutput() QueueHttpTargetOidcTokenOutput + ToQueueHttpTargetOidcTokenOutputWithContext(context.Context) QueueHttpTargetOidcTokenOutput +} + +type QueueHttpTargetOidcTokenArgs struct { + // Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + Audience pulumi.StringPtrInput `pulumi:"audience"` + // Service account email to be used for generating OIDC token. + // The service account must be within the same project as the queue. + // The caller must have iam.serviceAccounts.actAs permission for the service account. + ServiceAccountEmail pulumi.StringInput `pulumi:"serviceAccountEmail"` +} + +func (QueueHttpTargetOidcTokenArgs) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTargetOidcToken)(nil)).Elem() +} + +func (i QueueHttpTargetOidcTokenArgs) ToQueueHttpTargetOidcTokenOutput() QueueHttpTargetOidcTokenOutput { + return i.ToQueueHttpTargetOidcTokenOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetOidcTokenArgs) ToQueueHttpTargetOidcTokenOutputWithContext(ctx context.Context) QueueHttpTargetOidcTokenOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetOidcTokenOutput) +} + +func (i QueueHttpTargetOidcTokenArgs) ToQueueHttpTargetOidcTokenPtrOutput() QueueHttpTargetOidcTokenPtrOutput { + return i.ToQueueHttpTargetOidcTokenPtrOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetOidcTokenArgs) ToQueueHttpTargetOidcTokenPtrOutputWithContext(ctx context.Context) QueueHttpTargetOidcTokenPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetOidcTokenOutput).ToQueueHttpTargetOidcTokenPtrOutputWithContext(ctx) +} + +// QueueHttpTargetOidcTokenPtrInput is an input type that accepts QueueHttpTargetOidcTokenArgs, QueueHttpTargetOidcTokenPtr and QueueHttpTargetOidcTokenPtrOutput values. +// You can construct a concrete instance of `QueueHttpTargetOidcTokenPtrInput` via: +// +// QueueHttpTargetOidcTokenArgs{...} +// +// or: +// +// nil +type QueueHttpTargetOidcTokenPtrInput interface { + pulumi.Input + + ToQueueHttpTargetOidcTokenPtrOutput() QueueHttpTargetOidcTokenPtrOutput + ToQueueHttpTargetOidcTokenPtrOutputWithContext(context.Context) QueueHttpTargetOidcTokenPtrOutput +} + +type queueHttpTargetOidcTokenPtrType QueueHttpTargetOidcTokenArgs + +func QueueHttpTargetOidcTokenPtr(v *QueueHttpTargetOidcTokenArgs) QueueHttpTargetOidcTokenPtrInput { + return (*queueHttpTargetOidcTokenPtrType)(v) +} + +func (*queueHttpTargetOidcTokenPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**QueueHttpTargetOidcToken)(nil)).Elem() +} + +func (i *queueHttpTargetOidcTokenPtrType) ToQueueHttpTargetOidcTokenPtrOutput() QueueHttpTargetOidcTokenPtrOutput { + return i.ToQueueHttpTargetOidcTokenPtrOutputWithContext(context.Background()) +} + +func (i *queueHttpTargetOidcTokenPtrType) ToQueueHttpTargetOidcTokenPtrOutputWithContext(ctx context.Context) QueueHttpTargetOidcTokenPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetOidcTokenPtrOutput) +} + +type QueueHttpTargetOidcTokenOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetOidcTokenOutput) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTargetOidcToken)(nil)).Elem() +} + +func (o QueueHttpTargetOidcTokenOutput) ToQueueHttpTargetOidcTokenOutput() QueueHttpTargetOidcTokenOutput { + return o +} + +func (o QueueHttpTargetOidcTokenOutput) ToQueueHttpTargetOidcTokenOutputWithContext(ctx context.Context) QueueHttpTargetOidcTokenOutput { + return o +} + +func (o QueueHttpTargetOidcTokenOutput) ToQueueHttpTargetOidcTokenPtrOutput() QueueHttpTargetOidcTokenPtrOutput { + return o.ToQueueHttpTargetOidcTokenPtrOutputWithContext(context.Background()) +} + +func (o QueueHttpTargetOidcTokenOutput) ToQueueHttpTargetOidcTokenPtrOutputWithContext(ctx context.Context) QueueHttpTargetOidcTokenPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v QueueHttpTargetOidcToken) *QueueHttpTargetOidcToken { + return &v + }).(QueueHttpTargetOidcTokenPtrOutput) +} + +// Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. +func (o QueueHttpTargetOidcTokenOutput) Audience() pulumi.StringPtrOutput { + return o.ApplyT(func(v QueueHttpTargetOidcToken) *string { return v.Audience }).(pulumi.StringPtrOutput) +} + +// Service account email to be used for generating OIDC token. +// The service account must be within the same project as the queue. +// The caller must have iam.serviceAccounts.actAs permission for the service account. +func (o QueueHttpTargetOidcTokenOutput) ServiceAccountEmail() pulumi.StringOutput { + return o.ApplyT(func(v QueueHttpTargetOidcToken) string { return v.ServiceAccountEmail }).(pulumi.StringOutput) +} + +type QueueHttpTargetOidcTokenPtrOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetOidcTokenPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**QueueHttpTargetOidcToken)(nil)).Elem() +} + +func (o QueueHttpTargetOidcTokenPtrOutput) ToQueueHttpTargetOidcTokenPtrOutput() QueueHttpTargetOidcTokenPtrOutput { + return o +} + +func (o QueueHttpTargetOidcTokenPtrOutput) ToQueueHttpTargetOidcTokenPtrOutputWithContext(ctx context.Context) QueueHttpTargetOidcTokenPtrOutput { + return o +} + +func (o QueueHttpTargetOidcTokenPtrOutput) Elem() QueueHttpTargetOidcTokenOutput { + return o.ApplyT(func(v *QueueHttpTargetOidcToken) QueueHttpTargetOidcToken { + if v != nil { + return *v + } + var ret QueueHttpTargetOidcToken + return ret + }).(QueueHttpTargetOidcTokenOutput) +} + +// Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. +func (o QueueHttpTargetOidcTokenPtrOutput) Audience() pulumi.StringPtrOutput { + return o.ApplyT(func(v *QueueHttpTargetOidcToken) *string { + if v == nil { + return nil + } + return v.Audience + }).(pulumi.StringPtrOutput) +} + +// Service account email to be used for generating OIDC token. +// The service account must be within the same project as the queue. +// The caller must have iam.serviceAccounts.actAs permission for the service account. +func (o QueueHttpTargetOidcTokenPtrOutput) ServiceAccountEmail() pulumi.StringPtrOutput { + return o.ApplyT(func(v *QueueHttpTargetOidcToken) *string { + if v == nil { + return nil + } + return &v.ServiceAccountEmail + }).(pulumi.StringPtrOutput) +} + +type QueueHttpTargetUriOverride struct { + // Host override. + // When specified, replaces the host part of the task URL. + // For example, if the task URL is "https://www.google.com", and host value + // is set to "example.net", the overridden URI will be changed to "https://example.net". + // Host value cannot be an empty string (INVALID_ARGUMENT). + Host *string `pulumi:"host"` + // URI path. + // When specified, replaces the existing path of the task URL. + // Setting the path value to an empty string clears the URI path segment. + // Structure is documented below. + PathOverride *QueueHttpTargetUriOverridePathOverride `pulumi:"pathOverride"` + // Port override. + // When specified, replaces the port part of the task URI. + // For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + // Note that the port value must be a positive integer. + // Setting the port to 0 (Zero) clears the URI port. + Port *string `pulumi:"port"` + // URI query. + // When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + // Structure is documented below. + QueryOverride *QueueHttpTargetUriOverrideQueryOverride `pulumi:"queryOverride"` + // Scheme override. + // When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + // Possible values are: `HTTP`, `HTTPS`. + Scheme *string `pulumi:"scheme"` + // URI Override Enforce Mode + // When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + // Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + UriOverrideEnforceMode *string `pulumi:"uriOverrideEnforceMode"` +} + +// QueueHttpTargetUriOverrideInput is an input type that accepts QueueHttpTargetUriOverrideArgs and QueueHttpTargetUriOverrideOutput values. +// You can construct a concrete instance of `QueueHttpTargetUriOverrideInput` via: +// +// QueueHttpTargetUriOverrideArgs{...} +type QueueHttpTargetUriOverrideInput interface { + pulumi.Input + + ToQueueHttpTargetUriOverrideOutput() QueueHttpTargetUriOverrideOutput + ToQueueHttpTargetUriOverrideOutputWithContext(context.Context) QueueHttpTargetUriOverrideOutput +} + +type QueueHttpTargetUriOverrideArgs struct { + // Host override. + // When specified, replaces the host part of the task URL. + // For example, if the task URL is "https://www.google.com", and host value + // is set to "example.net", the overridden URI will be changed to "https://example.net". + // Host value cannot be an empty string (INVALID_ARGUMENT). + Host pulumi.StringPtrInput `pulumi:"host"` + // URI path. + // When specified, replaces the existing path of the task URL. + // Setting the path value to an empty string clears the URI path segment. + // Structure is documented below. + PathOverride QueueHttpTargetUriOverridePathOverridePtrInput `pulumi:"pathOverride"` + // Port override. + // When specified, replaces the port part of the task URI. + // For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + // Note that the port value must be a positive integer. + // Setting the port to 0 (Zero) clears the URI port. + Port pulumi.StringPtrInput `pulumi:"port"` + // URI query. + // When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + // Structure is documented below. + QueryOverride QueueHttpTargetUriOverrideQueryOverridePtrInput `pulumi:"queryOverride"` + // Scheme override. + // When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + // Possible values are: `HTTP`, `HTTPS`. + Scheme pulumi.StringPtrInput `pulumi:"scheme"` + // URI Override Enforce Mode + // When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + // Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + UriOverrideEnforceMode pulumi.StringPtrInput `pulumi:"uriOverrideEnforceMode"` +} + +func (QueueHttpTargetUriOverrideArgs) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTargetUriOverride)(nil)).Elem() +} + +func (i QueueHttpTargetUriOverrideArgs) ToQueueHttpTargetUriOverrideOutput() QueueHttpTargetUriOverrideOutput { + return i.ToQueueHttpTargetUriOverrideOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetUriOverrideArgs) ToQueueHttpTargetUriOverrideOutputWithContext(ctx context.Context) QueueHttpTargetUriOverrideOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetUriOverrideOutput) +} + +func (i QueueHttpTargetUriOverrideArgs) ToQueueHttpTargetUriOverridePtrOutput() QueueHttpTargetUriOverridePtrOutput { + return i.ToQueueHttpTargetUriOverridePtrOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetUriOverrideArgs) ToQueueHttpTargetUriOverridePtrOutputWithContext(ctx context.Context) QueueHttpTargetUriOverridePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetUriOverrideOutput).ToQueueHttpTargetUriOverridePtrOutputWithContext(ctx) +} + +// QueueHttpTargetUriOverridePtrInput is an input type that accepts QueueHttpTargetUriOverrideArgs, QueueHttpTargetUriOverridePtr and QueueHttpTargetUriOverridePtrOutput values. +// You can construct a concrete instance of `QueueHttpTargetUriOverridePtrInput` via: +// +// QueueHttpTargetUriOverrideArgs{...} +// +// or: +// +// nil +type QueueHttpTargetUriOverridePtrInput interface { + pulumi.Input + + ToQueueHttpTargetUriOverridePtrOutput() QueueHttpTargetUriOverridePtrOutput + ToQueueHttpTargetUriOverridePtrOutputWithContext(context.Context) QueueHttpTargetUriOverridePtrOutput +} + +type queueHttpTargetUriOverridePtrType QueueHttpTargetUriOverrideArgs + +func QueueHttpTargetUriOverridePtr(v *QueueHttpTargetUriOverrideArgs) QueueHttpTargetUriOverridePtrInput { + return (*queueHttpTargetUriOverridePtrType)(v) +} + +func (*queueHttpTargetUriOverridePtrType) ElementType() reflect.Type { + return reflect.TypeOf((**QueueHttpTargetUriOverride)(nil)).Elem() +} + +func (i *queueHttpTargetUriOverridePtrType) ToQueueHttpTargetUriOverridePtrOutput() QueueHttpTargetUriOverridePtrOutput { + return i.ToQueueHttpTargetUriOverridePtrOutputWithContext(context.Background()) +} + +func (i *queueHttpTargetUriOverridePtrType) ToQueueHttpTargetUriOverridePtrOutputWithContext(ctx context.Context) QueueHttpTargetUriOverridePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetUriOverridePtrOutput) +} + +type QueueHttpTargetUriOverrideOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetUriOverrideOutput) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTargetUriOverride)(nil)).Elem() +} + +func (o QueueHttpTargetUriOverrideOutput) ToQueueHttpTargetUriOverrideOutput() QueueHttpTargetUriOverrideOutput { + return o +} + +func (o QueueHttpTargetUriOverrideOutput) ToQueueHttpTargetUriOverrideOutputWithContext(ctx context.Context) QueueHttpTargetUriOverrideOutput { + return o +} + +func (o QueueHttpTargetUriOverrideOutput) ToQueueHttpTargetUriOverridePtrOutput() QueueHttpTargetUriOverridePtrOutput { + return o.ToQueueHttpTargetUriOverridePtrOutputWithContext(context.Background()) +} + +func (o QueueHttpTargetUriOverrideOutput) ToQueueHttpTargetUriOverridePtrOutputWithContext(ctx context.Context) QueueHttpTargetUriOverridePtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v QueueHttpTargetUriOverride) *QueueHttpTargetUriOverride { + return &v + }).(QueueHttpTargetUriOverridePtrOutput) +} + +// Host override. +// When specified, replaces the host part of the task URL. +// For example, if the task URL is "https://www.google.com", and host value +// is set to "example.net", the overridden URI will be changed to "https://example.net". +// Host value cannot be an empty string (INVALID_ARGUMENT). +func (o QueueHttpTargetUriOverrideOutput) Host() pulumi.StringPtrOutput { + return o.ApplyT(func(v QueueHttpTargetUriOverride) *string { return v.Host }).(pulumi.StringPtrOutput) +} + +// URI path. +// When specified, replaces the existing path of the task URL. +// Setting the path value to an empty string clears the URI path segment. +// Structure is documented below. +func (o QueueHttpTargetUriOverrideOutput) PathOverride() QueueHttpTargetUriOverridePathOverridePtrOutput { + return o.ApplyT(func(v QueueHttpTargetUriOverride) *QueueHttpTargetUriOverridePathOverride { return v.PathOverride }).(QueueHttpTargetUriOverridePathOverridePtrOutput) +} + +// Port override. +// When specified, replaces the port part of the task URI. +// For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. +// Note that the port value must be a positive integer. +// Setting the port to 0 (Zero) clears the URI port. +func (o QueueHttpTargetUriOverrideOutput) Port() pulumi.StringPtrOutput { + return o.ApplyT(func(v QueueHttpTargetUriOverride) *string { return v.Port }).(pulumi.StringPtrOutput) +} + +// URI query. +// When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. +// Structure is documented below. +func (o QueueHttpTargetUriOverrideOutput) QueryOverride() QueueHttpTargetUriOverrideQueryOverridePtrOutput { + return o.ApplyT(func(v QueueHttpTargetUriOverride) *QueueHttpTargetUriOverrideQueryOverride { return v.QueryOverride }).(QueueHttpTargetUriOverrideQueryOverridePtrOutput) +} + +// Scheme override. +// When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). +// Possible values are: `HTTP`, `HTTPS`. +func (o QueueHttpTargetUriOverrideOutput) Scheme() pulumi.StringPtrOutput { + return o.ApplyT(func(v QueueHttpTargetUriOverride) *string { return v.Scheme }).(pulumi.StringPtrOutput) +} + +// URI Override Enforce Mode +// When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. +// Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. +func (o QueueHttpTargetUriOverrideOutput) UriOverrideEnforceMode() pulumi.StringPtrOutput { + return o.ApplyT(func(v QueueHttpTargetUriOverride) *string { return v.UriOverrideEnforceMode }).(pulumi.StringPtrOutput) +} + +type QueueHttpTargetUriOverridePtrOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetUriOverridePtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**QueueHttpTargetUriOverride)(nil)).Elem() +} + +func (o QueueHttpTargetUriOverridePtrOutput) ToQueueHttpTargetUriOverridePtrOutput() QueueHttpTargetUriOverridePtrOutput { + return o +} + +func (o QueueHttpTargetUriOverridePtrOutput) ToQueueHttpTargetUriOverridePtrOutputWithContext(ctx context.Context) QueueHttpTargetUriOverridePtrOutput { + return o +} + +func (o QueueHttpTargetUriOverridePtrOutput) Elem() QueueHttpTargetUriOverrideOutput { + return o.ApplyT(func(v *QueueHttpTargetUriOverride) QueueHttpTargetUriOverride { + if v != nil { + return *v + } + var ret QueueHttpTargetUriOverride + return ret + }).(QueueHttpTargetUriOverrideOutput) +} + +// Host override. +// When specified, replaces the host part of the task URL. +// For example, if the task URL is "https://www.google.com", and host value +// is set to "example.net", the overridden URI will be changed to "https://example.net". +// Host value cannot be an empty string (INVALID_ARGUMENT). +func (o QueueHttpTargetUriOverridePtrOutput) Host() pulumi.StringPtrOutput { + return o.ApplyT(func(v *QueueHttpTargetUriOverride) *string { + if v == nil { + return nil + } + return v.Host + }).(pulumi.StringPtrOutput) +} + +// URI path. +// When specified, replaces the existing path of the task URL. +// Setting the path value to an empty string clears the URI path segment. +// Structure is documented below. +func (o QueueHttpTargetUriOverridePtrOutput) PathOverride() QueueHttpTargetUriOverridePathOverridePtrOutput { + return o.ApplyT(func(v *QueueHttpTargetUriOverride) *QueueHttpTargetUriOverridePathOverride { + if v == nil { + return nil + } + return v.PathOverride + }).(QueueHttpTargetUriOverridePathOverridePtrOutput) +} + +// Port override. +// When specified, replaces the port part of the task URI. +// For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. +// Note that the port value must be a positive integer. +// Setting the port to 0 (Zero) clears the URI port. +func (o QueueHttpTargetUriOverridePtrOutput) Port() pulumi.StringPtrOutput { + return o.ApplyT(func(v *QueueHttpTargetUriOverride) *string { + if v == nil { + return nil + } + return v.Port + }).(pulumi.StringPtrOutput) +} + +// URI query. +// When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. +// Structure is documented below. +func (o QueueHttpTargetUriOverridePtrOutput) QueryOverride() QueueHttpTargetUriOverrideQueryOverridePtrOutput { + return o.ApplyT(func(v *QueueHttpTargetUriOverride) *QueueHttpTargetUriOverrideQueryOverride { + if v == nil { + return nil + } + return v.QueryOverride + }).(QueueHttpTargetUriOverrideQueryOverridePtrOutput) +} + +// Scheme override. +// When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). +// Possible values are: `HTTP`, `HTTPS`. +func (o QueueHttpTargetUriOverridePtrOutput) Scheme() pulumi.StringPtrOutput { + return o.ApplyT(func(v *QueueHttpTargetUriOverride) *string { + if v == nil { + return nil + } + return v.Scheme + }).(pulumi.StringPtrOutput) +} + +// URI Override Enforce Mode +// When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. +// Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. +func (o QueueHttpTargetUriOverridePtrOutput) UriOverrideEnforceMode() pulumi.StringPtrOutput { + return o.ApplyT(func(v *QueueHttpTargetUriOverride) *string { + if v == nil { + return nil + } + return v.UriOverrideEnforceMode + }).(pulumi.StringPtrOutput) +} + +type QueueHttpTargetUriOverridePathOverride struct { + // The URI path (e.g., /users/1234). Default is an empty string. + Path *string `pulumi:"path"` +} + +// QueueHttpTargetUriOverridePathOverrideInput is an input type that accepts QueueHttpTargetUriOverridePathOverrideArgs and QueueHttpTargetUriOverridePathOverrideOutput values. +// You can construct a concrete instance of `QueueHttpTargetUriOverridePathOverrideInput` via: +// +// QueueHttpTargetUriOverridePathOverrideArgs{...} +type QueueHttpTargetUriOverridePathOverrideInput interface { + pulumi.Input + + ToQueueHttpTargetUriOverridePathOverrideOutput() QueueHttpTargetUriOverridePathOverrideOutput + ToQueueHttpTargetUriOverridePathOverrideOutputWithContext(context.Context) QueueHttpTargetUriOverridePathOverrideOutput +} + +type QueueHttpTargetUriOverridePathOverrideArgs struct { + // The URI path (e.g., /users/1234). Default is an empty string. + Path pulumi.StringPtrInput `pulumi:"path"` +} + +func (QueueHttpTargetUriOverridePathOverrideArgs) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTargetUriOverridePathOverride)(nil)).Elem() +} + +func (i QueueHttpTargetUriOverridePathOverrideArgs) ToQueueHttpTargetUriOverridePathOverrideOutput() QueueHttpTargetUriOverridePathOverrideOutput { + return i.ToQueueHttpTargetUriOverridePathOverrideOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetUriOverridePathOverrideArgs) ToQueueHttpTargetUriOverridePathOverrideOutputWithContext(ctx context.Context) QueueHttpTargetUriOverridePathOverrideOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetUriOverridePathOverrideOutput) +} + +func (i QueueHttpTargetUriOverridePathOverrideArgs) ToQueueHttpTargetUriOverridePathOverridePtrOutput() QueueHttpTargetUriOverridePathOverridePtrOutput { + return i.ToQueueHttpTargetUriOverridePathOverridePtrOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetUriOverridePathOverrideArgs) ToQueueHttpTargetUriOverridePathOverridePtrOutputWithContext(ctx context.Context) QueueHttpTargetUriOverridePathOverridePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetUriOverridePathOverrideOutput).ToQueueHttpTargetUriOverridePathOverridePtrOutputWithContext(ctx) +} + +// QueueHttpTargetUriOverridePathOverridePtrInput is an input type that accepts QueueHttpTargetUriOverridePathOverrideArgs, QueueHttpTargetUriOverridePathOverridePtr and QueueHttpTargetUriOverridePathOverridePtrOutput values. +// You can construct a concrete instance of `QueueHttpTargetUriOverridePathOverridePtrInput` via: +// +// QueueHttpTargetUriOverridePathOverrideArgs{...} +// +// or: +// +// nil +type QueueHttpTargetUriOverridePathOverridePtrInput interface { + pulumi.Input + + ToQueueHttpTargetUriOverridePathOverridePtrOutput() QueueHttpTargetUriOverridePathOverridePtrOutput + ToQueueHttpTargetUriOverridePathOverridePtrOutputWithContext(context.Context) QueueHttpTargetUriOverridePathOverridePtrOutput +} + +type queueHttpTargetUriOverridePathOverridePtrType QueueHttpTargetUriOverridePathOverrideArgs + +func QueueHttpTargetUriOverridePathOverridePtr(v *QueueHttpTargetUriOverridePathOverrideArgs) QueueHttpTargetUriOverridePathOverridePtrInput { + return (*queueHttpTargetUriOverridePathOverridePtrType)(v) +} + +func (*queueHttpTargetUriOverridePathOverridePtrType) ElementType() reflect.Type { + return reflect.TypeOf((**QueueHttpTargetUriOverridePathOverride)(nil)).Elem() +} + +func (i *queueHttpTargetUriOverridePathOverridePtrType) ToQueueHttpTargetUriOverridePathOverridePtrOutput() QueueHttpTargetUriOverridePathOverridePtrOutput { + return i.ToQueueHttpTargetUriOverridePathOverridePtrOutputWithContext(context.Background()) +} + +func (i *queueHttpTargetUriOverridePathOverridePtrType) ToQueueHttpTargetUriOverridePathOverridePtrOutputWithContext(ctx context.Context) QueueHttpTargetUriOverridePathOverridePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetUriOverridePathOverridePtrOutput) +} + +type QueueHttpTargetUriOverridePathOverrideOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetUriOverridePathOverrideOutput) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTargetUriOverridePathOverride)(nil)).Elem() +} + +func (o QueueHttpTargetUriOverridePathOverrideOutput) ToQueueHttpTargetUriOverridePathOverrideOutput() QueueHttpTargetUriOverridePathOverrideOutput { + return o +} + +func (o QueueHttpTargetUriOverridePathOverrideOutput) ToQueueHttpTargetUriOverridePathOverrideOutputWithContext(ctx context.Context) QueueHttpTargetUriOverridePathOverrideOutput { + return o +} + +func (o QueueHttpTargetUriOverridePathOverrideOutput) ToQueueHttpTargetUriOverridePathOverridePtrOutput() QueueHttpTargetUriOverridePathOverridePtrOutput { + return o.ToQueueHttpTargetUriOverridePathOverridePtrOutputWithContext(context.Background()) +} + +func (o QueueHttpTargetUriOverridePathOverrideOutput) ToQueueHttpTargetUriOverridePathOverridePtrOutputWithContext(ctx context.Context) QueueHttpTargetUriOverridePathOverridePtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v QueueHttpTargetUriOverridePathOverride) *QueueHttpTargetUriOverridePathOverride { + return &v + }).(QueueHttpTargetUriOverridePathOverridePtrOutput) +} + +// The URI path (e.g., /users/1234). Default is an empty string. +func (o QueueHttpTargetUriOverridePathOverrideOutput) Path() pulumi.StringPtrOutput { + return o.ApplyT(func(v QueueHttpTargetUriOverridePathOverride) *string { return v.Path }).(pulumi.StringPtrOutput) +} + +type QueueHttpTargetUriOverridePathOverridePtrOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetUriOverridePathOverridePtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**QueueHttpTargetUriOverridePathOverride)(nil)).Elem() +} + +func (o QueueHttpTargetUriOverridePathOverridePtrOutput) ToQueueHttpTargetUriOverridePathOverridePtrOutput() QueueHttpTargetUriOverridePathOverridePtrOutput { + return o +} + +func (o QueueHttpTargetUriOverridePathOverridePtrOutput) ToQueueHttpTargetUriOverridePathOverridePtrOutputWithContext(ctx context.Context) QueueHttpTargetUriOverridePathOverridePtrOutput { + return o +} + +func (o QueueHttpTargetUriOverridePathOverridePtrOutput) Elem() QueueHttpTargetUriOverridePathOverrideOutput { + return o.ApplyT(func(v *QueueHttpTargetUriOverridePathOverride) QueueHttpTargetUriOverridePathOverride { + if v != nil { + return *v + } + var ret QueueHttpTargetUriOverridePathOverride + return ret + }).(QueueHttpTargetUriOverridePathOverrideOutput) +} + +// The URI path (e.g., /users/1234). Default is an empty string. +func (o QueueHttpTargetUriOverridePathOverridePtrOutput) Path() pulumi.StringPtrOutput { + return o.ApplyT(func(v *QueueHttpTargetUriOverridePathOverride) *string { + if v == nil { + return nil + } + return v.Path + }).(pulumi.StringPtrOutput) +} + +type QueueHttpTargetUriOverrideQueryOverride struct { + // The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + QueryParams *string `pulumi:"queryParams"` +} + +// QueueHttpTargetUriOverrideQueryOverrideInput is an input type that accepts QueueHttpTargetUriOverrideQueryOverrideArgs and QueueHttpTargetUriOverrideQueryOverrideOutput values. +// You can construct a concrete instance of `QueueHttpTargetUriOverrideQueryOverrideInput` via: +// +// QueueHttpTargetUriOverrideQueryOverrideArgs{...} +type QueueHttpTargetUriOverrideQueryOverrideInput interface { + pulumi.Input + + ToQueueHttpTargetUriOverrideQueryOverrideOutput() QueueHttpTargetUriOverrideQueryOverrideOutput + ToQueueHttpTargetUriOverrideQueryOverrideOutputWithContext(context.Context) QueueHttpTargetUriOverrideQueryOverrideOutput +} + +type QueueHttpTargetUriOverrideQueryOverrideArgs struct { + // The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + QueryParams pulumi.StringPtrInput `pulumi:"queryParams"` +} + +func (QueueHttpTargetUriOverrideQueryOverrideArgs) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTargetUriOverrideQueryOverride)(nil)).Elem() +} + +func (i QueueHttpTargetUriOverrideQueryOverrideArgs) ToQueueHttpTargetUriOverrideQueryOverrideOutput() QueueHttpTargetUriOverrideQueryOverrideOutput { + return i.ToQueueHttpTargetUriOverrideQueryOverrideOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetUriOverrideQueryOverrideArgs) ToQueueHttpTargetUriOverrideQueryOverrideOutputWithContext(ctx context.Context) QueueHttpTargetUriOverrideQueryOverrideOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetUriOverrideQueryOverrideOutput) +} + +func (i QueueHttpTargetUriOverrideQueryOverrideArgs) ToQueueHttpTargetUriOverrideQueryOverridePtrOutput() QueueHttpTargetUriOverrideQueryOverridePtrOutput { + return i.ToQueueHttpTargetUriOverrideQueryOverridePtrOutputWithContext(context.Background()) +} + +func (i QueueHttpTargetUriOverrideQueryOverrideArgs) ToQueueHttpTargetUriOverrideQueryOverridePtrOutputWithContext(ctx context.Context) QueueHttpTargetUriOverrideQueryOverridePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetUriOverrideQueryOverrideOutput).ToQueueHttpTargetUriOverrideQueryOverridePtrOutputWithContext(ctx) +} + +// QueueHttpTargetUriOverrideQueryOverridePtrInput is an input type that accepts QueueHttpTargetUriOverrideQueryOverrideArgs, QueueHttpTargetUriOverrideQueryOverridePtr and QueueHttpTargetUriOverrideQueryOverridePtrOutput values. +// You can construct a concrete instance of `QueueHttpTargetUriOverrideQueryOverridePtrInput` via: +// +// QueueHttpTargetUriOverrideQueryOverrideArgs{...} +// +// or: +// +// nil +type QueueHttpTargetUriOverrideQueryOverridePtrInput interface { + pulumi.Input + + ToQueueHttpTargetUriOverrideQueryOverridePtrOutput() QueueHttpTargetUriOverrideQueryOverridePtrOutput + ToQueueHttpTargetUriOverrideQueryOverridePtrOutputWithContext(context.Context) QueueHttpTargetUriOverrideQueryOverridePtrOutput +} + +type queueHttpTargetUriOverrideQueryOverridePtrType QueueHttpTargetUriOverrideQueryOverrideArgs + +func QueueHttpTargetUriOverrideQueryOverridePtr(v *QueueHttpTargetUriOverrideQueryOverrideArgs) QueueHttpTargetUriOverrideQueryOverridePtrInput { + return (*queueHttpTargetUriOverrideQueryOverridePtrType)(v) +} + +func (*queueHttpTargetUriOverrideQueryOverridePtrType) ElementType() reflect.Type { + return reflect.TypeOf((**QueueHttpTargetUriOverrideQueryOverride)(nil)).Elem() +} + +func (i *queueHttpTargetUriOverrideQueryOverridePtrType) ToQueueHttpTargetUriOverrideQueryOverridePtrOutput() QueueHttpTargetUriOverrideQueryOverridePtrOutput { + return i.ToQueueHttpTargetUriOverrideQueryOverridePtrOutputWithContext(context.Background()) +} + +func (i *queueHttpTargetUriOverrideQueryOverridePtrType) ToQueueHttpTargetUriOverrideQueryOverridePtrOutputWithContext(ctx context.Context) QueueHttpTargetUriOverrideQueryOverridePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(QueueHttpTargetUriOverrideQueryOverridePtrOutput) +} + +type QueueHttpTargetUriOverrideQueryOverrideOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetUriOverrideQueryOverrideOutput) ElementType() reflect.Type { + return reflect.TypeOf((*QueueHttpTargetUriOverrideQueryOverride)(nil)).Elem() +} + +func (o QueueHttpTargetUriOverrideQueryOverrideOutput) ToQueueHttpTargetUriOverrideQueryOverrideOutput() QueueHttpTargetUriOverrideQueryOverrideOutput { + return o +} + +func (o QueueHttpTargetUriOverrideQueryOverrideOutput) ToQueueHttpTargetUriOverrideQueryOverrideOutputWithContext(ctx context.Context) QueueHttpTargetUriOverrideQueryOverrideOutput { + return o +} + +func (o QueueHttpTargetUriOverrideQueryOverrideOutput) ToQueueHttpTargetUriOverrideQueryOverridePtrOutput() QueueHttpTargetUriOverrideQueryOverridePtrOutput { + return o.ToQueueHttpTargetUriOverrideQueryOverridePtrOutputWithContext(context.Background()) +} + +func (o QueueHttpTargetUriOverrideQueryOverrideOutput) ToQueueHttpTargetUriOverrideQueryOverridePtrOutputWithContext(ctx context.Context) QueueHttpTargetUriOverrideQueryOverridePtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v QueueHttpTargetUriOverrideQueryOverride) *QueueHttpTargetUriOverrideQueryOverride { + return &v + }).(QueueHttpTargetUriOverrideQueryOverridePtrOutput) +} + +// The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. +func (o QueueHttpTargetUriOverrideQueryOverrideOutput) QueryParams() pulumi.StringPtrOutput { + return o.ApplyT(func(v QueueHttpTargetUriOverrideQueryOverride) *string { return v.QueryParams }).(pulumi.StringPtrOutput) +} + +type QueueHttpTargetUriOverrideQueryOverridePtrOutput struct{ *pulumi.OutputState } + +func (QueueHttpTargetUriOverrideQueryOverridePtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**QueueHttpTargetUriOverrideQueryOverride)(nil)).Elem() +} + +func (o QueueHttpTargetUriOverrideQueryOverridePtrOutput) ToQueueHttpTargetUriOverrideQueryOverridePtrOutput() QueueHttpTargetUriOverrideQueryOverridePtrOutput { + return o +} + +func (o QueueHttpTargetUriOverrideQueryOverridePtrOutput) ToQueueHttpTargetUriOverrideQueryOverridePtrOutputWithContext(ctx context.Context) QueueHttpTargetUriOverrideQueryOverridePtrOutput { + return o +} + +func (o QueueHttpTargetUriOverrideQueryOverridePtrOutput) Elem() QueueHttpTargetUriOverrideQueryOverrideOutput { + return o.ApplyT(func(v *QueueHttpTargetUriOverrideQueryOverride) QueueHttpTargetUriOverrideQueryOverride { + if v != nil { + return *v + } + var ret QueueHttpTargetUriOverrideQueryOverride + return ret + }).(QueueHttpTargetUriOverrideQueryOverrideOutput) +} + +// The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. +func (o QueueHttpTargetUriOverrideQueryOverridePtrOutput) QueryParams() pulumi.StringPtrOutput { + return o.ApplyT(func(v *QueueHttpTargetUriOverrideQueryOverride) *string { + if v == nil { + return nil + } + return v.QueryParams + }).(pulumi.StringPtrOutput) +} + type QueueIamBindingCondition struct { Description *string `pulumi:"description"` Expression string `pulumi:"expression"` @@ -1193,6 +2545,21 @@ func (o QueueStackdriverLoggingConfigPtrOutput) SamplingRatio() pulumi.Float64Pt func init() { pulumi.RegisterInputType(reflect.TypeOf((*QueueAppEngineRoutingOverrideInput)(nil)).Elem(), QueueAppEngineRoutingOverrideArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*QueueAppEngineRoutingOverridePtrInput)(nil)).Elem(), QueueAppEngineRoutingOverrideArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetInput)(nil)).Elem(), QueueHttpTargetArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetPtrInput)(nil)).Elem(), QueueHttpTargetArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetHeaderOverrideInput)(nil)).Elem(), QueueHttpTargetHeaderOverrideArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetHeaderOverrideArrayInput)(nil)).Elem(), QueueHttpTargetHeaderOverrideArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetHeaderOverrideHeaderInput)(nil)).Elem(), QueueHttpTargetHeaderOverrideHeaderArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetOauthTokenInput)(nil)).Elem(), QueueHttpTargetOauthTokenArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetOauthTokenPtrInput)(nil)).Elem(), QueueHttpTargetOauthTokenArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetOidcTokenInput)(nil)).Elem(), QueueHttpTargetOidcTokenArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetOidcTokenPtrInput)(nil)).Elem(), QueueHttpTargetOidcTokenArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetUriOverrideInput)(nil)).Elem(), QueueHttpTargetUriOverrideArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetUriOverridePtrInput)(nil)).Elem(), QueueHttpTargetUriOverrideArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetUriOverridePathOverrideInput)(nil)).Elem(), QueueHttpTargetUriOverridePathOverrideArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetUriOverridePathOverridePtrInput)(nil)).Elem(), QueueHttpTargetUriOverridePathOverrideArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetUriOverrideQueryOverrideInput)(nil)).Elem(), QueueHttpTargetUriOverrideQueryOverrideArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*QueueHttpTargetUriOverrideQueryOverridePtrInput)(nil)).Elem(), QueueHttpTargetUriOverrideQueryOverrideArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*QueueIamBindingConditionInput)(nil)).Elem(), QueueIamBindingConditionArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*QueueIamBindingConditionPtrInput)(nil)).Elem(), QueueIamBindingConditionArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*QueueIamMemberConditionInput)(nil)).Elem(), QueueIamMemberConditionArgs{}) @@ -1205,6 +2572,21 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*QueueStackdriverLoggingConfigPtrInput)(nil)).Elem(), QueueStackdriverLoggingConfigArgs{}) pulumi.RegisterOutputType(QueueAppEngineRoutingOverrideOutput{}) pulumi.RegisterOutputType(QueueAppEngineRoutingOverridePtrOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetPtrOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetHeaderOverrideOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetHeaderOverrideArrayOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetHeaderOverrideHeaderOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetOauthTokenOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetOauthTokenPtrOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetOidcTokenOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetOidcTokenPtrOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetUriOverrideOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetUriOverridePtrOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetUriOverridePathOverrideOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetUriOverridePathOverridePtrOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetUriOverrideQueryOverrideOutput{}) + pulumi.RegisterOutputType(QueueHttpTargetUriOverrideQueryOverridePtrOutput{}) pulumi.RegisterOutputType(QueueIamBindingConditionOutput{}) pulumi.RegisterOutputType(QueueIamBindingConditionPtrOutput{}) pulumi.RegisterOutputType(QueueIamMemberConditionOutput{}) diff --git a/sdk/go/gcp/cloudtasks/queue.go b/sdk/go/gcp/cloudtasks/queue.go index 5d032f521a..127a9a1cae 100644 --- a/sdk/go/gcp/cloudtasks/queue.go +++ b/sdk/go/gcp/cloudtasks/queue.go @@ -87,6 +87,140 @@ import ( // } // // ``` +// ### Cloud Tasks Queue Http Target Oidc +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudtasks" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// oidcServiceAccount, err := serviceaccount.NewAccount(ctx, "oidc_service_account", &serviceaccount.AccountArgs{ +// AccountId: pulumi.String("example-oidc"), +// DisplayName: pulumi.String("Tasks Queue OIDC Service Account"), +// }) +// if err != nil { +// return err +// } +// _, err = cloudtasks.NewQueue(ctx, "http_target_oidc", &cloudtasks.QueueArgs{ +// Name: pulumi.String("cloud-tasks-queue-http-target-oidc"), +// Location: pulumi.String("us-central1"), +// HttpTarget: &cloudtasks.QueueHttpTargetArgs{ +// HttpMethod: pulumi.String("POST"), +// UriOverride: &cloudtasks.QueueHttpTargetUriOverrideArgs{ +// Scheme: pulumi.String("HTTPS"), +// Host: pulumi.String("oidc.example.com"), +// Port: pulumi.String("8443"), +// PathOverride: &cloudtasks.QueueHttpTargetUriOverridePathOverrideArgs{ +// Path: pulumi.String("/users/1234"), +// }, +// QueryOverride: &cloudtasks.QueueHttpTargetUriOverrideQueryOverrideArgs{ +// QueryParams: pulumi.String("qparam1=123&qparam2=456"), +// }, +// UriOverrideEnforceMode: pulumi.String("IF_NOT_EXISTS"), +// }, +// HeaderOverrides: cloudtasks.QueueHttpTargetHeaderOverrideArray{ +// &cloudtasks.QueueHttpTargetHeaderOverrideArgs{ +// Header: &cloudtasks.QueueHttpTargetHeaderOverrideHeaderArgs{ +// Key: pulumi.String("AddSomethingElse"), +// Value: pulumi.String("MyOtherValue"), +// }, +// }, +// &cloudtasks.QueueHttpTargetHeaderOverrideArgs{ +// Header: &cloudtasks.QueueHttpTargetHeaderOverrideHeaderArgs{ +// Key: pulumi.String("AddMe"), +// Value: pulumi.String("MyValue"), +// }, +// }, +// }, +// OidcToken: &cloudtasks.QueueHttpTargetOidcTokenArgs{ +// ServiceAccountEmail: oidcServiceAccount.Email, +// Audience: pulumi.String("https://oidc.example.com"), +// }, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// ### Cloud Tasks Queue Http Target Oauth +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/cloudtasks" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// oauthServiceAccount, err := serviceaccount.NewAccount(ctx, "oauth_service_account", &serviceaccount.AccountArgs{ +// AccountId: pulumi.String("example-oauth"), +// DisplayName: pulumi.String("Tasks Queue OAuth Service Account"), +// }) +// if err != nil { +// return err +// } +// _, err = cloudtasks.NewQueue(ctx, "http_target_oauth", &cloudtasks.QueueArgs{ +// Name: pulumi.String("cloud-tasks-queue-http-target-oauth"), +// Location: pulumi.String("us-central1"), +// HttpTarget: &cloudtasks.QueueHttpTargetArgs{ +// HttpMethod: pulumi.String("POST"), +// UriOverride: &cloudtasks.QueueHttpTargetUriOverrideArgs{ +// Scheme: pulumi.String("HTTPS"), +// Host: pulumi.String("oauth.example.com"), +// Port: pulumi.String("8443"), +// PathOverride: &cloudtasks.QueueHttpTargetUriOverridePathOverrideArgs{ +// Path: pulumi.String("/users/1234"), +// }, +// QueryOverride: &cloudtasks.QueueHttpTargetUriOverrideQueryOverrideArgs{ +// QueryParams: pulumi.String("qparam1=123&qparam2=456"), +// }, +// UriOverrideEnforceMode: pulumi.String("IF_NOT_EXISTS"), +// }, +// HeaderOverrides: cloudtasks.QueueHttpTargetHeaderOverrideArray{ +// &cloudtasks.QueueHttpTargetHeaderOverrideArgs{ +// Header: &cloudtasks.QueueHttpTargetHeaderOverrideHeaderArgs{ +// Key: pulumi.String("AddSomethingElse"), +// Value: pulumi.String("MyOtherValue"), +// }, +// }, +// &cloudtasks.QueueHttpTargetHeaderOverrideArgs{ +// Header: &cloudtasks.QueueHttpTargetHeaderOverrideHeaderArgs{ +// Key: pulumi.String("AddMe"), +// Value: pulumi.String("MyValue"), +// }, +// }, +// }, +// OauthToken: &cloudtasks.QueueHttpTargetOauthTokenArgs{ +// ServiceAccountEmail: oauthServiceAccount.Email, +// Scope: pulumi.String("openid https://www.googleapis.com/auth/userinfo.email"), +// }, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` // // ## Import // @@ -118,6 +252,9 @@ type Queue struct { // to App Engine tasks in this queue // Structure is documented below. AppEngineRoutingOverride QueueAppEngineRoutingOverridePtrOutput `pulumi:"appEngineRoutingOverride"` + // Modifies HTTP target for HTTP tasks. + // Structure is documented below. + HttpTarget QueueHttpTargetPtrOutput `pulumi:"httpTarget"` // The location of the queue // // *** @@ -181,6 +318,9 @@ type queueState struct { // to App Engine tasks in this queue // Structure is documented below. AppEngineRoutingOverride *QueueAppEngineRoutingOverride `pulumi:"appEngineRoutingOverride"` + // Modifies HTTP target for HTTP tasks. + // Structure is documented below. + HttpTarget *QueueHttpTarget `pulumi:"httpTarget"` // The location of the queue // // *** @@ -212,6 +352,9 @@ type QueueState struct { // to App Engine tasks in this queue // Structure is documented below. AppEngineRoutingOverride QueueAppEngineRoutingOverridePtrInput + // Modifies HTTP target for HTTP tasks. + // Structure is documented below. + HttpTarget QueueHttpTargetPtrInput // The location of the queue // // *** @@ -247,6 +390,9 @@ type queueArgs struct { // to App Engine tasks in this queue // Structure is documented below. AppEngineRoutingOverride *QueueAppEngineRoutingOverride `pulumi:"appEngineRoutingOverride"` + // Modifies HTTP target for HTTP tasks. + // Structure is documented below. + HttpTarget *QueueHttpTarget `pulumi:"httpTarget"` // The location of the queue // // *** @@ -279,6 +425,9 @@ type QueueArgs struct { // to App Engine tasks in this queue // Structure is documented below. AppEngineRoutingOverride QueueAppEngineRoutingOverridePtrInput + // Modifies HTTP target for HTTP tasks. + // Structure is documented below. + HttpTarget QueueHttpTargetPtrInput // The location of the queue // // *** @@ -399,6 +548,12 @@ func (o QueueOutput) AppEngineRoutingOverride() QueueAppEngineRoutingOverridePtr return o.ApplyT(func(v *Queue) QueueAppEngineRoutingOverridePtrOutput { return v.AppEngineRoutingOverride }).(QueueAppEngineRoutingOverridePtrOutput) } +// Modifies HTTP target for HTTP tasks. +// Structure is documented below. +func (o QueueOutput) HttpTarget() QueueHttpTargetPtrOutput { + return o.ApplyT(func(v *Queue) QueueHttpTargetPtrOutput { return v.HttpTarget }).(QueueHttpTargetPtrOutput) +} + // The location of the queue // // *** diff --git a/sdk/go/gcp/compute/getInstance.go b/sdk/go/gcp/compute/getInstance.go index 94a4ba4faf..fe0dcecaec 100644 --- a/sdk/go/gcp/compute/getInstance.go +++ b/sdk/go/gcp/compute/getInstance.go @@ -81,7 +81,7 @@ type LookupInstanceResult struct { ConfidentialInstanceConfigs []GetInstanceConfidentialInstanceConfig `pulumi:"confidentialInstanceConfigs"` // The CPU platform used by this instance. CpuPlatform string `pulumi:"cpuPlatform"` - // The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + // The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). CurrentStatus string `pulumi:"currentStatus"` // Whether deletion protection is enabled on this instance. DeletionProtection bool `pulumi:"deletionProtection"` @@ -221,7 +221,7 @@ func (o LookupInstanceResultOutput) CpuPlatform() pulumi.StringOutput { return o.ApplyT(func(v LookupInstanceResult) string { return v.CpuPlatform }).(pulumi.StringOutput) } -// The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, +// The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). func (o LookupInstanceResultOutput) CurrentStatus() pulumi.StringOutput { return o.ApplyT(func(v LookupInstanceResult) string { return v.CurrentStatus }).(pulumi.StringOutput) } diff --git a/sdk/go/gcp/compute/healthCheck.go b/sdk/go/gcp/compute/healthCheck.go index b724f16467..bc84c8482b 100644 --- a/sdk/go/gcp/compute/healthCheck.go +++ b/sdk/go/gcp/compute/healthCheck.go @@ -468,6 +468,111 @@ import ( // } // // ``` +// ### Compute Health Check Http Source Regions +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := compute.NewHealthCheck(ctx, "http-health-check-with-source-regions", &compute.HealthCheckArgs{ +// Name: pulumi.String("http-health-check"), +// CheckIntervalSec: pulumi.Int(30), +// HttpHealthCheck: &compute.HealthCheckHttpHealthCheckArgs{ +// Port: pulumi.Int(80), +// PortSpecification: pulumi.String("USE_FIXED_PORT"), +// }, +// SourceRegions: pulumi.StringArray{ +// pulumi.String("us-west1"), +// pulumi.String("us-central1"), +// pulumi.String("us-east5"), +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// ### Compute Health Check Https Source Regions +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := compute.NewHealthCheck(ctx, "https-health-check-with-source-regions", &compute.HealthCheckArgs{ +// Name: pulumi.String("https-health-check"), +// CheckIntervalSec: pulumi.Int(30), +// HttpsHealthCheck: &compute.HealthCheckHttpsHealthCheckArgs{ +// Port: pulumi.Int(80), +// PortSpecification: pulumi.String("USE_FIXED_PORT"), +// }, +// SourceRegions: pulumi.StringArray{ +// pulumi.String("us-west1"), +// pulumi.String("us-central1"), +// pulumi.String("us-east5"), +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// ### Compute Health Check Tcp Source Regions +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := compute.NewHealthCheck(ctx, "tcp-health-check-with-source-regions", &compute.HealthCheckArgs{ +// Name: pulumi.String("tcp-health-check"), +// CheckIntervalSec: pulumi.Int(30), +// TcpHealthCheck: &compute.HealthCheckTcpHealthCheckArgs{ +// Port: pulumi.Int(80), +// PortSpecification: pulumi.String("USE_FIXED_PORT"), +// }, +// SourceRegions: pulumi.StringArray{ +// pulumi.String("us-west1"), +// pulumi.String("us-central1"), +// pulumi.String("us-east5"), +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` // // ## Import // diff --git a/sdk/go/gcp/compute/instance.go b/sdk/go/gcp/compute/instance.go index 1d5e003a51..68fb944676 100644 --- a/sdk/go/gcp/compute/instance.go +++ b/sdk/go/gcp/compute/instance.go @@ -88,6 +88,76 @@ import ( // // ``` // +// ### Confidential Computing +// +// Example with [Confidential Mode](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) activated. +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{ +// AccountId: pulumi.String("my-custom-sa"), +// DisplayName: pulumi.String("Custom SA for VM Instance"), +// }) +// if err != nil { +// return err +// } +// _, err = compute.NewInstance(ctx, "confidential_instance", &compute.InstanceArgs{ +// NetworkInterfaces: compute.InstanceNetworkInterfaceArray{ +// &compute.InstanceNetworkInterfaceArgs{ +// AccessConfigs: compute.InstanceNetworkInterfaceAccessConfigArray{ +// nil, +// }, +// Network: pulumi.String("default"), +// }, +// }, +// Name: pulumi.String("my-confidential-instance"), +// Zone: pulumi.String("us-central1-a"), +// MachineType: pulumi.String("n2d-standard-2"), +// MinCpuPlatform: pulumi.String("AMD Milan"), +// ConfidentialInstanceConfig: &compute.InstanceConfidentialInstanceConfigArgs{ +// EnableConfidentialCompute: pulumi.Bool(true), +// ConfidentialInstanceType: pulumi.String("SEV"), +// }, +// BootDisk: &compute.InstanceBootDiskArgs{ +// InitializeParams: &compute.InstanceBootDiskInitializeParamsArgs{ +// Image: pulumi.String("ubuntu-os-cloud/ubuntu-2004-lts"), +// Labels: pulumi.StringMap{ +// "my_label": pulumi.String("value"), +// }, +// }, +// }, +// ScratchDisks: compute.InstanceScratchDiskArray{ +// &compute.InstanceScratchDiskArgs{ +// Interface: pulumi.String("NVME"), +// }, +// }, +// ServiceAccount: &compute.InstanceServiceAccountArgs{ +// Email: _default.Email, +// Scopes: pulumi.StringArray{ +// pulumi.String("cloud-platform"), +// }, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// // ## Import // // Instances can be imported using any of these accepted formats: @@ -132,7 +202,7 @@ type Instance struct { ConfidentialInstanceConfig InstanceConfidentialInstanceConfigOutput `pulumi:"confidentialInstanceConfig"` // The CPU platform used by this instance. CpuPlatform pulumi.StringOutput `pulumi:"cpuPlatform"` - // The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + // The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). CurrentStatus pulumi.StringOutput `pulumi:"currentStatus"` // Enable deletion protection on this instance. Defaults to false. // **Note:** you must disable deletion protection before removing the resource (e.g., via `pulumi destroy`), or the instance cannot be deleted and the provider run will not complete successfully. @@ -317,7 +387,7 @@ type instanceState struct { ConfidentialInstanceConfig *InstanceConfidentialInstanceConfig `pulumi:"confidentialInstanceConfig"` // The CPU platform used by this instance. CpuPlatform *string `pulumi:"cpuPlatform"` - // The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + // The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). CurrentStatus *string `pulumi:"currentStatus"` // Enable deletion protection on this instance. Defaults to false. // **Note:** you must disable deletion protection before removing the resource (e.g., via `pulumi destroy`), or the instance cannot be deleted and the provider run will not complete successfully. @@ -459,7 +529,7 @@ type InstanceState struct { ConfidentialInstanceConfig InstanceConfidentialInstanceConfigPtrInput // The CPU platform used by this instance. CpuPlatform pulumi.StringPtrInput - // The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + // The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). CurrentStatus pulumi.StringPtrInput // Enable deletion protection on this instance. Defaults to false. // **Note:** you must disable deletion protection before removing the resource (e.g., via `pulumi destroy`), or the instance cannot be deleted and the provider run will not complete successfully. @@ -961,7 +1031,7 @@ func (o InstanceOutput) CpuPlatform() pulumi.StringOutput { return o.ApplyT(func(v *Instance) pulumi.StringOutput { return v.CpuPlatform }).(pulumi.StringOutput) } -// The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, +// The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). func (o InstanceOutput) CurrentStatus() pulumi.StringOutput { return o.ApplyT(func(v *Instance) pulumi.StringOutput { return v.CurrentStatus }).(pulumi.StringOutput) } diff --git a/sdk/go/gcp/compute/instanceTemplate.go b/sdk/go/gcp/compute/instanceTemplate.go index cc03a71afd..42b6af85c9 100644 --- a/sdk/go/gcp/compute/instanceTemplate.go +++ b/sdk/go/gcp/compute/instanceTemplate.go @@ -232,6 +232,68 @@ import ( // // ``` // +// ### Confidential Computing +// +// Example with [Confidential Mode](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) activated. +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/serviceaccount" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{ +// AccountId: pulumi.String("my-custom-sa"), +// DisplayName: pulumi.String("Custom SA for VM Instance"), +// }) +// if err != nil { +// return err +// } +// _, err = compute.NewInstanceTemplate(ctx, "confidential_instance_template", &compute.InstanceTemplateArgs{ +// NetworkInterfaces: compute.InstanceTemplateNetworkInterfaceArray{ +// &compute.InstanceTemplateNetworkInterfaceArgs{ +// AccessConfigs: compute.InstanceTemplateNetworkInterfaceAccessConfigArray{ +// nil, +// }, +// Network: pulumi.String("default"), +// }, +// }, +// Name: pulumi.String("my-confidential-instance-template"), +// Region: pulumi.String("us-central1"), +// MachineType: pulumi.String("n2d-standard-2"), +// MinCpuPlatform: pulumi.String("AMD Milan"), +// ConfidentialInstanceConfig: &compute.InstanceTemplateConfidentialInstanceConfigArgs{ +// EnableConfidentialCompute: pulumi.Bool(true), +// ConfidentialInstanceType: pulumi.String("SEV"), +// }, +// Disks: compute.InstanceTemplateDiskArray{ +// &compute.InstanceTemplateDiskArgs{ +// SourceImage: pulumi.String("ubuntu-os-cloud/ubuntu-2004-lts"), +// }, +// }, +// ServiceAccount: &compute.InstanceTemplateServiceAccountArgs{ +// Email: _default.Email, +// Scopes: pulumi.StringArray{ +// pulumi.String("cloud-platform"), +// }, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// // ## Deploying the Latest Image // // A common way to use instance templates and managed instance groups is to deploy the diff --git a/sdk/go/gcp/compute/interconnect.go b/sdk/go/gcp/compute/interconnect.go index c476dfecad..08eeb6e33a 100644 --- a/sdk/go/gcp/compute/interconnect.go +++ b/sdk/go/gcp/compute/interconnect.go @@ -189,11 +189,12 @@ type Interconnect struct { // Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside // of Google's network that the interconnect is connected to. RemoteLocation pulumi.StringPtrOutput `pulumi:"remoteLocation"` - // interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + // interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( // If specified then the connection is created on MACsec capable hardware ports. If not // specified, the default value is false, which allocates non-MACsec capable ports first if - // available). - // Each value may be one of: `MACSEC`. + // available). Note that MACSEC is still technically allowed for compatibility reasons, but it + // does not work with the API, and will be removed in an upcoming major version. + // Each value may be one of: `MACSEC`, `IF_MACSEC`. RequestedFeatures pulumi.StringArrayOutput `pulumi:"requestedFeatures"` // Target number of physical links in the link bundle, as requested by the customer. RequestedLinkCount pulumi.IntOutput `pulumi:"requestedLinkCount"` @@ -362,11 +363,12 @@ type interconnectState struct { // Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside // of Google's network that the interconnect is connected to. RemoteLocation *string `pulumi:"remoteLocation"` - // interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + // interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( // If specified then the connection is created on MACsec capable hardware ports. If not // specified, the default value is false, which allocates non-MACsec capable ports first if - // available). - // Each value may be one of: `MACSEC`. + // available). Note that MACSEC is still technically allowed for compatibility reasons, but it + // does not work with the API, and will be removed in an upcoming major version. + // Each value may be one of: `MACSEC`, `IF_MACSEC`. RequestedFeatures []string `pulumi:"requestedFeatures"` // Target number of physical links in the link bundle, as requested by the customer. RequestedLinkCount *int `pulumi:"requestedLinkCount"` @@ -486,11 +488,12 @@ type InterconnectState struct { // Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside // of Google's network that the interconnect is connected to. RemoteLocation pulumi.StringPtrInput - // interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + // interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( // If specified then the connection is created on MACsec capable hardware ports. If not // specified, the default value is false, which allocates non-MACsec capable ports first if - // available). - // Each value may be one of: `MACSEC`. + // available). Note that MACSEC is still technically allowed for compatibility reasons, but it + // does not work with the API, and will be removed in an upcoming major version. + // Each value may be one of: `MACSEC`, `IF_MACSEC`. RequestedFeatures pulumi.StringArrayInput // Target number of physical links in the link bundle, as requested by the customer. RequestedLinkCount pulumi.IntPtrInput @@ -568,11 +571,12 @@ type interconnectArgs struct { // Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside // of Google's network that the interconnect is connected to. RemoteLocation *string `pulumi:"remoteLocation"` - // interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + // interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( // If specified then the connection is created on MACsec capable hardware ports. If not // specified, the default value is false, which allocates non-MACsec capable ports first if - // available). - // Each value may be one of: `MACSEC`. + // available). Note that MACSEC is still technically allowed for compatibility reasons, but it + // does not work with the API, and will be removed in an upcoming major version. + // Each value may be one of: `MACSEC`, `IF_MACSEC`. RequestedFeatures []string `pulumi:"requestedFeatures"` // Target number of physical links in the link bundle, as requested by the customer. RequestedLinkCount int `pulumi:"requestedLinkCount"` @@ -636,11 +640,12 @@ type InterconnectArgs struct { // Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside // of Google's network that the interconnect is connected to. RemoteLocation pulumi.StringPtrInput - // interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + // interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( // If specified then the connection is created on MACsec capable hardware ports. If not // specified, the default value is false, which allocates non-MACsec capable ports first if - // available). - // Each value may be one of: `MACSEC`. + // available). Note that MACSEC is still technically allowed for compatibility reasons, but it + // does not work with the API, and will be removed in an upcoming major version. + // Each value may be one of: `MACSEC`, `IF_MACSEC`. RequestedFeatures pulumi.StringArrayInput // Target number of physical links in the link bundle, as requested by the customer. RequestedLinkCount pulumi.IntInput @@ -913,11 +918,12 @@ func (o InterconnectOutput) RemoteLocation() pulumi.StringPtrOutput { return o.ApplyT(func(v *Interconnect) pulumi.StringPtrOutput { return v.RemoteLocation }).(pulumi.StringPtrOutput) } -// interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( +// interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( // If specified then the connection is created on MACsec capable hardware ports. If not // specified, the default value is false, which allocates non-MACsec capable ports first if -// available). -// Each value may be one of: `MACSEC`. +// available). Note that MACSEC is still technically allowed for compatibility reasons, but it +// does not work with the API, and will be removed in an upcoming major version. +// Each value may be one of: `MACSEC`, `IF_MACSEC`. func (o InterconnectOutput) RequestedFeatures() pulumi.StringArrayOutput { return o.ApplyT(func(v *Interconnect) pulumi.StringArrayOutput { return v.RequestedFeatures }).(pulumi.StringArrayOutput) } diff --git a/sdk/go/gcp/compute/nodeTemplate.go b/sdk/go/gcp/compute/nodeTemplate.go index a03bd1dbf5..2d5d54ab77 100644 --- a/sdk/go/gcp/compute/nodeTemplate.go +++ b/sdk/go/gcp/compute/nodeTemplate.go @@ -89,6 +89,45 @@ import ( // } // // ``` +// ### Node Template Accelerators +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := compute.GetNodeTypes(ctx, &compute.GetNodeTypesArgs{ +// Zone: pulumi.StringRef("us-central1-a"), +// }, nil) +// if err != nil { +// return err +// } +// _, err = compute.NewNodeTemplate(ctx, "template", &compute.NodeTemplateArgs{ +// Name: pulumi.String("soletenant-with-accelerators"), +// Region: pulumi.String("us-central1"), +// NodeType: pulumi.String("n1-node-96-624"), +// Accelerators: compute.NodeTemplateAcceleratorArray{ +// &compute.NodeTemplateAcceleratorArgs{ +// AcceleratorType: pulumi.String("nvidia-tesla-t4"), +// AcceleratorCount: pulumi.Int(4), +// }, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` // // ## Import // @@ -122,6 +161,10 @@ import ( type NodeTemplate struct { pulumi.CustomResourceState + // List of the type and count of accelerator cards attached to the + // node template + // Structure is documented below. + Accelerators NodeTemplateAcceleratorArrayOutput `pulumi:"accelerators"` // CPU overcommit. // Default value is `NONE`. // Possible values are: `ENABLED`, `NONE`. @@ -188,6 +231,10 @@ func GetNodeTemplate(ctx *pulumi.Context, // Input properties used for looking up and filtering NodeTemplate resources. type nodeTemplateState struct { + // List of the type and count of accelerator cards attached to the + // node template + // Structure is documented below. + Accelerators []NodeTemplateAccelerator `pulumi:"accelerators"` // CPU overcommit. // Default value is `NONE`. // Possible values are: `ENABLED`, `NONE`. @@ -225,6 +272,10 @@ type nodeTemplateState struct { } type NodeTemplateState struct { + // List of the type and count of accelerator cards attached to the + // node template + // Structure is documented below. + Accelerators NodeTemplateAcceleratorArrayInput // CPU overcommit. // Default value is `NONE`. // Possible values are: `ENABLED`, `NONE`. @@ -266,6 +317,10 @@ func (NodeTemplateState) ElementType() reflect.Type { } type nodeTemplateArgs struct { + // List of the type and count of accelerator cards attached to the + // node template + // Structure is documented below. + Accelerators []NodeTemplateAccelerator `pulumi:"accelerators"` // CPU overcommit. // Default value is `NONE`. // Possible values are: `ENABLED`, `NONE`. @@ -300,6 +355,10 @@ type nodeTemplateArgs struct { // The set of arguments for constructing a NodeTemplate resource. type NodeTemplateArgs struct { + // List of the type and count of accelerator cards attached to the + // node template + // Structure is documented below. + Accelerators NodeTemplateAcceleratorArrayInput // CPU overcommit. // Default value is `NONE`. // Possible values are: `ENABLED`, `NONE`. @@ -419,6 +478,13 @@ func (o NodeTemplateOutput) ToNodeTemplateOutputWithContext(ctx context.Context) return o } +// List of the type and count of accelerator cards attached to the +// node template +// Structure is documented below. +func (o NodeTemplateOutput) Accelerators() NodeTemplateAcceleratorArrayOutput { + return o.ApplyT(func(v *NodeTemplate) NodeTemplateAcceleratorArrayOutput { return v.Accelerators }).(NodeTemplateAcceleratorArrayOutput) +} + // CPU overcommit. // Default value is `NONE`. // Possible values are: `ENABLED`, `NONE`. diff --git a/sdk/go/gcp/compute/pulumiTypes.go b/sdk/go/gcp/compute/pulumiTypes.go index 4c69b0b67e..36aec28292 100644 --- a/sdk/go/gcp/compute/pulumiTypes.go +++ b/sdk/go/gcp/compute/pulumiTypes.go @@ -9213,7 +9213,7 @@ type FirewallAllow struct { // is only applicable for UDP or TCP protocol. Each entry must be // either an integer or a range. If not specified, this rule // applies to connections through any port. - // Example inputs include: ["22"], ["80","443"], and + // Example inputs include: [22], [80, 443], and // ["12345-12349"]. Ports []string `pulumi:"ports"` // The IP protocol to which this rule applies. The protocol type is @@ -9239,7 +9239,7 @@ type FirewallAllowArgs struct { // is only applicable for UDP or TCP protocol. Each entry must be // either an integer or a range. If not specified, this rule // applies to connections through any port. - // Example inputs include: ["22"], ["80","443"], and + // Example inputs include: [22], [80, 443], and // ["12345-12349"]. Ports pulumi.StringArrayInput `pulumi:"ports"` // The IP protocol to which this rule applies. The protocol type is @@ -9304,7 +9304,7 @@ func (o FirewallAllowOutput) ToFirewallAllowOutputWithContext(ctx context.Contex // is only applicable for UDP or TCP protocol. Each entry must be // either an integer or a range. If not specified, this rule // applies to connections through any port. -// Example inputs include: ["22"], ["80","443"], and +// Example inputs include: [22], [80, 443], and // ["12345-12349"]. func (o FirewallAllowOutput) Ports() pulumi.StringArrayOutput { return o.ApplyT(func(v FirewallAllow) []string { return v.Ports }).(pulumi.StringArrayOutput) @@ -9343,7 +9343,7 @@ type FirewallDeny struct { // is only applicable for UDP or TCP protocol. Each entry must be // either an integer or a range. If not specified, this rule // applies to connections through any port. - // Example inputs include: ["22"], ["80","443"], and + // Example inputs include: [22], [80, 443], and // ["12345-12349"]. Ports []string `pulumi:"ports"` // The IP protocol to which this rule applies. The protocol type is @@ -9369,7 +9369,7 @@ type FirewallDenyArgs struct { // is only applicable for UDP or TCP protocol. Each entry must be // either an integer or a range. If not specified, this rule // applies to connections through any port. - // Example inputs include: ["22"], ["80","443"], and + // Example inputs include: [22], [80, 443], and // ["12345-12349"]. Ports pulumi.StringArrayInput `pulumi:"ports"` // The IP protocol to which this rule applies. The protocol type is @@ -9434,7 +9434,7 @@ func (o FirewallDenyOutput) ToFirewallDenyOutputWithContext(ctx context.Context) // is only applicable for UDP or TCP protocol. Each entry must be // either an integer or a range. If not specified, this rule // applies to connections through any port. -// Example inputs include: ["22"], ["80","443"], and +// Example inputs include: [22], [80, 443], and // ["12345-12349"]. func (o FirewallDenyOutput) Ports() pulumi.StringArrayOutput { return o.ApplyT(func(v FirewallDeny) []string { return v.Ports }).(pulumi.StringArrayOutput) @@ -14006,6 +14006,8 @@ type InstanceBootDisk struct { // alongside the new instance. Either `initializeParams` or `source` must be set. // Structure is documented below. InitializeParams *InstanceBootDiskInitializeParams `pulumi:"initializeParams"` + // The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) + Interface *string `pulumi:"interface"` // The selfLink of the encryption key that is // stored in Google Cloud KMS to encrypt this disk. Only one of `kmsKeySelfLink` // and `diskEncryptionKeyRaw` may be set. @@ -14051,6 +14053,8 @@ type InstanceBootDiskArgs struct { // alongside the new instance. Either `initializeParams` or `source` must be set. // Structure is documented below. InitializeParams InstanceBootDiskInitializeParamsPtrInput `pulumi:"initializeParams"` + // The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) + Interface pulumi.StringPtrInput `pulumi:"interface"` // The selfLink of the encryption key that is // stored in Google Cloud KMS to encrypt this disk. Only one of `kmsKeySelfLink` // and `diskEncryptionKeyRaw` may be set. @@ -14176,6 +14180,11 @@ func (o InstanceBootDiskOutput) InitializeParams() InstanceBootDiskInitializePar return o.ApplyT(func(v InstanceBootDisk) *InstanceBootDiskInitializeParams { return v.InitializeParams }).(InstanceBootDiskInitializeParamsPtrOutput) } +// The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) +func (o InstanceBootDiskOutput) Interface() pulumi.StringPtrOutput { + return o.ApplyT(func(v InstanceBootDisk) *string { return v.Interface }).(pulumi.StringPtrOutput) +} + // The selfLink of the encryption key that is // stored in Google Cloud KMS to encrypt this disk. Only one of `kmsKeySelfLink` // and `diskEncryptionKeyRaw` may be set. @@ -14280,6 +14289,16 @@ func (o InstanceBootDiskPtrOutput) InitializeParams() InstanceBootDiskInitialize }).(InstanceBootDiskInitializeParamsPtrOutput) } +// The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) +func (o InstanceBootDiskPtrOutput) Interface() pulumi.StringPtrOutput { + return o.ApplyT(func(v *InstanceBootDisk) *string { + if v == nil { + return nil + } + return v.Interface + }).(pulumi.StringPtrOutput) +} + // The selfLink of the encryption key that is // stored in Google Cloud KMS to encrypt this disk. Only one of `kmsKeySelfLink` // and `diskEncryptionKeyRaw` may be set. @@ -15192,6 +15211,8 @@ type InstanceFromMachineImageBootDisk struct { DiskEncryptionKeySha256 *string `pulumi:"diskEncryptionKeySha256"` // Parameters with which a disk was created alongside the instance. InitializeParams *InstanceFromMachineImageBootDiskInitializeParams `pulumi:"initializeParams"` + // The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) + Interface *string `pulumi:"interface"` // The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. KmsKeySelfLink *string `pulumi:"kmsKeySelfLink"` // Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE". @@ -15222,6 +15243,8 @@ type InstanceFromMachineImageBootDiskArgs struct { DiskEncryptionKeySha256 pulumi.StringPtrInput `pulumi:"diskEncryptionKeySha256"` // Parameters with which a disk was created alongside the instance. InitializeParams InstanceFromMachineImageBootDiskInitializeParamsPtrInput `pulumi:"initializeParams"` + // The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) + Interface pulumi.StringPtrInput `pulumi:"interface"` // The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. KmsKeySelfLink pulumi.StringPtrInput `pulumi:"kmsKeySelfLink"` // Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE". @@ -15308,6 +15331,11 @@ func (o InstanceFromMachineImageBootDiskOutput) InitializeParams() InstanceFromM }).(InstanceFromMachineImageBootDiskInitializeParamsPtrOutput) } +// The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) +func (o InstanceFromMachineImageBootDiskOutput) Interface() pulumi.StringPtrOutput { + return o.ApplyT(func(v InstanceFromMachineImageBootDisk) *string { return v.Interface }).(pulumi.StringPtrOutput) +} + // The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. func (o InstanceFromMachineImageBootDiskOutput) KmsKeySelfLink() pulumi.StringPtrOutput { return o.ApplyT(func(v InstanceFromMachineImageBootDisk) *string { return v.KmsKeySelfLink }).(pulumi.StringPtrOutput) @@ -18822,6 +18850,8 @@ type InstanceFromTemplateBootDisk struct { DiskEncryptionKeySha256 *string `pulumi:"diskEncryptionKeySha256"` // Parameters with which a disk was created alongside the instance. InitializeParams *InstanceFromTemplateBootDiskInitializeParams `pulumi:"initializeParams"` + // The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) + Interface *string `pulumi:"interface"` // The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. KmsKeySelfLink *string `pulumi:"kmsKeySelfLink"` // Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE". @@ -18852,6 +18882,8 @@ type InstanceFromTemplateBootDiskArgs struct { DiskEncryptionKeySha256 pulumi.StringPtrInput `pulumi:"diskEncryptionKeySha256"` // Parameters with which a disk was created alongside the instance. InitializeParams InstanceFromTemplateBootDiskInitializeParamsPtrInput `pulumi:"initializeParams"` + // The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) + Interface pulumi.StringPtrInput `pulumi:"interface"` // The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. KmsKeySelfLink pulumi.StringPtrInput `pulumi:"kmsKeySelfLink"` // Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE". @@ -18964,6 +18996,11 @@ func (o InstanceFromTemplateBootDiskOutput) InitializeParams() InstanceFromTempl }).(InstanceFromTemplateBootDiskInitializeParamsPtrOutput) } +// The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) +func (o InstanceFromTemplateBootDiskOutput) Interface() pulumi.StringPtrOutput { + return o.ApplyT(func(v InstanceFromTemplateBootDisk) *string { return v.Interface }).(pulumi.StringPtrOutput) +} + // The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. func (o InstanceFromTemplateBootDiskOutput) KmsKeySelfLink() pulumi.StringPtrOutput { return o.ApplyT(func(v InstanceFromTemplateBootDisk) *string { return v.KmsKeySelfLink }).(pulumi.StringPtrOutput) @@ -19053,6 +19090,16 @@ func (o InstanceFromTemplateBootDiskPtrOutput) InitializeParams() InstanceFromTe }).(InstanceFromTemplateBootDiskInitializeParamsPtrOutput) } +// The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) +func (o InstanceFromTemplateBootDiskPtrOutput) Interface() pulumi.StringPtrOutput { + return o.ApplyT(func(v *InstanceFromTemplateBootDisk) *string { + if v == nil { + return nil + } + return v.Interface + }).(pulumi.StringPtrOutput) +} + // The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. func (o InstanceFromTemplateBootDiskPtrOutput) KmsKeySelfLink() pulumi.StringPtrOutput { return o.ApplyT(func(v *InstanceFromTemplateBootDisk) *string { @@ -25202,7 +25249,7 @@ type InstanceNetworkInterface struct { // in custom subnet mode, specifying the subnetwork is required. Subnetwork *string `pulumi:"subnetwork"` // The project in which the subnetwork belongs. - // If the `subnetwork` is a self_link, this field is ignored in favor of the project + // If the `subnetwork` is a self_link, this field is set to the project // defined in the subnetwork self_link. If the `subnetwork` is a name and this // field is not provided, the provider project is used. SubnetworkProject *string `pulumi:"subnetworkProject"` @@ -25266,7 +25313,7 @@ type InstanceNetworkInterfaceArgs struct { // in custom subnet mode, specifying the subnetwork is required. Subnetwork pulumi.StringPtrInput `pulumi:"subnetwork"` // The project in which the subnetwork belongs. - // If the `subnetwork` is a self_link, this field is ignored in favor of the project + // If the `subnetwork` is a self_link, this field is set to the project // defined in the subnetwork self_link. If the `subnetwork` is a name and this // field is not provided, the provider project is used. SubnetworkProject pulumi.StringPtrInput `pulumi:"subnetworkProject"` @@ -25416,7 +25463,7 @@ func (o InstanceNetworkInterfaceOutput) Subnetwork() pulumi.StringPtrOutput { } // The project in which the subnetwork belongs. -// If the `subnetwork` is a self_link, this field is ignored in favor of the project +// If the `subnetwork` is a self_link, this field is set to the project // defined in the subnetwork self_link. If the `subnetwork` is a name and this // field is not provided, the provider project is used. func (o InstanceNetworkInterfaceOutput) SubnetworkProject() pulumi.StringPtrOutput { @@ -34957,6 +35004,118 @@ func (o NodeGroupShareSettingsProjectMapArrayOutput) Index(i pulumi.IntInput) No }).(NodeGroupShareSettingsProjectMapOutput) } +type NodeTemplateAccelerator struct { + // The number of the guest accelerator cards exposed to this + // node template. + AcceleratorCount *int `pulumi:"acceleratorCount"` + // Full or partial URL of the accelerator type resource to expose + // to this node template. + AcceleratorType *string `pulumi:"acceleratorType"` +} + +// NodeTemplateAcceleratorInput is an input type that accepts NodeTemplateAcceleratorArgs and NodeTemplateAcceleratorOutput values. +// You can construct a concrete instance of `NodeTemplateAcceleratorInput` via: +// +// NodeTemplateAcceleratorArgs{...} +type NodeTemplateAcceleratorInput interface { + pulumi.Input + + ToNodeTemplateAcceleratorOutput() NodeTemplateAcceleratorOutput + ToNodeTemplateAcceleratorOutputWithContext(context.Context) NodeTemplateAcceleratorOutput +} + +type NodeTemplateAcceleratorArgs struct { + // The number of the guest accelerator cards exposed to this + // node template. + AcceleratorCount pulumi.IntPtrInput `pulumi:"acceleratorCount"` + // Full or partial URL of the accelerator type resource to expose + // to this node template. + AcceleratorType pulumi.StringPtrInput `pulumi:"acceleratorType"` +} + +func (NodeTemplateAcceleratorArgs) ElementType() reflect.Type { + return reflect.TypeOf((*NodeTemplateAccelerator)(nil)).Elem() +} + +func (i NodeTemplateAcceleratorArgs) ToNodeTemplateAcceleratorOutput() NodeTemplateAcceleratorOutput { + return i.ToNodeTemplateAcceleratorOutputWithContext(context.Background()) +} + +func (i NodeTemplateAcceleratorArgs) ToNodeTemplateAcceleratorOutputWithContext(ctx context.Context) NodeTemplateAcceleratorOutput { + return pulumi.ToOutputWithContext(ctx, i).(NodeTemplateAcceleratorOutput) +} + +// NodeTemplateAcceleratorArrayInput is an input type that accepts NodeTemplateAcceleratorArray and NodeTemplateAcceleratorArrayOutput values. +// You can construct a concrete instance of `NodeTemplateAcceleratorArrayInput` via: +// +// NodeTemplateAcceleratorArray{ NodeTemplateAcceleratorArgs{...} } +type NodeTemplateAcceleratorArrayInput interface { + pulumi.Input + + ToNodeTemplateAcceleratorArrayOutput() NodeTemplateAcceleratorArrayOutput + ToNodeTemplateAcceleratorArrayOutputWithContext(context.Context) NodeTemplateAcceleratorArrayOutput +} + +type NodeTemplateAcceleratorArray []NodeTemplateAcceleratorInput + +func (NodeTemplateAcceleratorArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]NodeTemplateAccelerator)(nil)).Elem() +} + +func (i NodeTemplateAcceleratorArray) ToNodeTemplateAcceleratorArrayOutput() NodeTemplateAcceleratorArrayOutput { + return i.ToNodeTemplateAcceleratorArrayOutputWithContext(context.Background()) +} + +func (i NodeTemplateAcceleratorArray) ToNodeTemplateAcceleratorArrayOutputWithContext(ctx context.Context) NodeTemplateAcceleratorArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(NodeTemplateAcceleratorArrayOutput) +} + +type NodeTemplateAcceleratorOutput struct{ *pulumi.OutputState } + +func (NodeTemplateAcceleratorOutput) ElementType() reflect.Type { + return reflect.TypeOf((*NodeTemplateAccelerator)(nil)).Elem() +} + +func (o NodeTemplateAcceleratorOutput) ToNodeTemplateAcceleratorOutput() NodeTemplateAcceleratorOutput { + return o +} + +func (o NodeTemplateAcceleratorOutput) ToNodeTemplateAcceleratorOutputWithContext(ctx context.Context) NodeTemplateAcceleratorOutput { + return o +} + +// The number of the guest accelerator cards exposed to this +// node template. +func (o NodeTemplateAcceleratorOutput) AcceleratorCount() pulumi.IntPtrOutput { + return o.ApplyT(func(v NodeTemplateAccelerator) *int { return v.AcceleratorCount }).(pulumi.IntPtrOutput) +} + +// Full or partial URL of the accelerator type resource to expose +// to this node template. +func (o NodeTemplateAcceleratorOutput) AcceleratorType() pulumi.StringPtrOutput { + return o.ApplyT(func(v NodeTemplateAccelerator) *string { return v.AcceleratorType }).(pulumi.StringPtrOutput) +} + +type NodeTemplateAcceleratorArrayOutput struct{ *pulumi.OutputState } + +func (NodeTemplateAcceleratorArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]NodeTemplateAccelerator)(nil)).Elem() +} + +func (o NodeTemplateAcceleratorArrayOutput) ToNodeTemplateAcceleratorArrayOutput() NodeTemplateAcceleratorArrayOutput { + return o +} + +func (o NodeTemplateAcceleratorArrayOutput) ToNodeTemplateAcceleratorArrayOutputWithContext(ctx context.Context) NodeTemplateAcceleratorArrayOutput { + return o +} + +func (o NodeTemplateAcceleratorArrayOutput) Index(i pulumi.IntInput) NodeTemplateAcceleratorOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) NodeTemplateAccelerator { + return vs[0].([]NodeTemplateAccelerator)[vs[1].(int)] + }).(NodeTemplateAcceleratorOutput) +} + type NodeTemplateNodeTypeFlexibility struct { // Number of virtual CPUs to use. Cpus *string `pulumi:"cpus"` @@ -88864,216 +89023,6 @@ func (o URLMapDefaultUrlRedirectPtrOutput) StripQuery() pulumi.BoolPtrOutput { }).(pulumi.BoolPtrOutput) } -type URLMapHeaderAction struct { - // Headers to add to a matching request prior to forwarding the request to the backendService. - // Structure is documented below. - RequestHeadersToAdds []URLMapHeaderActionRequestHeadersToAdd `pulumi:"requestHeadersToAdds"` - // A list of header names for headers that need to be removed from the request prior to - // forwarding the request to the backendService. - RequestHeadersToRemoves []string `pulumi:"requestHeadersToRemoves"` - // Headers to add the response prior to sending the response back to the client. - // Structure is documented below. - ResponseHeadersToAdds []URLMapHeaderActionResponseHeadersToAdd `pulumi:"responseHeadersToAdds"` - // A list of header names for headers that need to be removed from the response prior to sending the - // response back to the client. - ResponseHeadersToRemoves []string `pulumi:"responseHeadersToRemoves"` -} - -// URLMapHeaderActionInput is an input type that accepts URLMapHeaderActionArgs and URLMapHeaderActionOutput values. -// You can construct a concrete instance of `URLMapHeaderActionInput` via: -// -// URLMapHeaderActionArgs{...} -type URLMapHeaderActionInput interface { - pulumi.Input - - ToURLMapHeaderActionOutput() URLMapHeaderActionOutput - ToURLMapHeaderActionOutputWithContext(context.Context) URLMapHeaderActionOutput -} - -type URLMapHeaderActionArgs struct { - // Headers to add to a matching request prior to forwarding the request to the backendService. - // Structure is documented below. - RequestHeadersToAdds URLMapHeaderActionRequestHeadersToAddArrayInput `pulumi:"requestHeadersToAdds"` - // A list of header names for headers that need to be removed from the request prior to - // forwarding the request to the backendService. - RequestHeadersToRemoves pulumi.StringArrayInput `pulumi:"requestHeadersToRemoves"` - // Headers to add the response prior to sending the response back to the client. - // Structure is documented below. - ResponseHeadersToAdds URLMapHeaderActionResponseHeadersToAddArrayInput `pulumi:"responseHeadersToAdds"` - // A list of header names for headers that need to be removed from the response prior to sending the - // response back to the client. - ResponseHeadersToRemoves pulumi.StringArrayInput `pulumi:"responseHeadersToRemoves"` -} - -func (URLMapHeaderActionArgs) ElementType() reflect.Type { - return reflect.TypeOf((*URLMapHeaderAction)(nil)).Elem() -} - -func (i URLMapHeaderActionArgs) ToURLMapHeaderActionOutput() URLMapHeaderActionOutput { - return i.ToURLMapHeaderActionOutputWithContext(context.Background()) -} - -func (i URLMapHeaderActionArgs) ToURLMapHeaderActionOutputWithContext(ctx context.Context) URLMapHeaderActionOutput { - return pulumi.ToOutputWithContext(ctx, i).(URLMapHeaderActionOutput) -} - -func (i URLMapHeaderActionArgs) ToURLMapHeaderActionPtrOutput() URLMapHeaderActionPtrOutput { - return i.ToURLMapHeaderActionPtrOutputWithContext(context.Background()) -} - -func (i URLMapHeaderActionArgs) ToURLMapHeaderActionPtrOutputWithContext(ctx context.Context) URLMapHeaderActionPtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(URLMapHeaderActionOutput).ToURLMapHeaderActionPtrOutputWithContext(ctx) -} - -// URLMapHeaderActionPtrInput is an input type that accepts URLMapHeaderActionArgs, URLMapHeaderActionPtr and URLMapHeaderActionPtrOutput values. -// You can construct a concrete instance of `URLMapHeaderActionPtrInput` via: -// -// URLMapHeaderActionArgs{...} -// -// or: -// -// nil -type URLMapHeaderActionPtrInput interface { - pulumi.Input - - ToURLMapHeaderActionPtrOutput() URLMapHeaderActionPtrOutput - ToURLMapHeaderActionPtrOutputWithContext(context.Context) URLMapHeaderActionPtrOutput -} - -type urlmapHeaderActionPtrType URLMapHeaderActionArgs - -func URLMapHeaderActionPtr(v *URLMapHeaderActionArgs) URLMapHeaderActionPtrInput { - return (*urlmapHeaderActionPtrType)(v) -} - -func (*urlmapHeaderActionPtrType) ElementType() reflect.Type { - return reflect.TypeOf((**URLMapHeaderAction)(nil)).Elem() -} - -func (i *urlmapHeaderActionPtrType) ToURLMapHeaderActionPtrOutput() URLMapHeaderActionPtrOutput { - return i.ToURLMapHeaderActionPtrOutputWithContext(context.Background()) -} - -func (i *urlmapHeaderActionPtrType) ToURLMapHeaderActionPtrOutputWithContext(ctx context.Context) URLMapHeaderActionPtrOutput { - return pulumi.ToOutputWithContext(ctx, i).(URLMapHeaderActionPtrOutput) -} - -type URLMapHeaderActionOutput struct{ *pulumi.OutputState } - -func (URLMapHeaderActionOutput) ElementType() reflect.Type { - return reflect.TypeOf((*URLMapHeaderAction)(nil)).Elem() -} - -func (o URLMapHeaderActionOutput) ToURLMapHeaderActionOutput() URLMapHeaderActionOutput { - return o -} - -func (o URLMapHeaderActionOutput) ToURLMapHeaderActionOutputWithContext(ctx context.Context) URLMapHeaderActionOutput { - return o -} - -func (o URLMapHeaderActionOutput) ToURLMapHeaderActionPtrOutput() URLMapHeaderActionPtrOutput { - return o.ToURLMapHeaderActionPtrOutputWithContext(context.Background()) -} - -func (o URLMapHeaderActionOutput) ToURLMapHeaderActionPtrOutputWithContext(ctx context.Context) URLMapHeaderActionPtrOutput { - return o.ApplyTWithContext(ctx, func(_ context.Context, v URLMapHeaderAction) *URLMapHeaderAction { - return &v - }).(URLMapHeaderActionPtrOutput) -} - -// Headers to add to a matching request prior to forwarding the request to the backendService. -// Structure is documented below. -func (o URLMapHeaderActionOutput) RequestHeadersToAdds() URLMapHeaderActionRequestHeadersToAddArrayOutput { - return o.ApplyT(func(v URLMapHeaderAction) []URLMapHeaderActionRequestHeadersToAdd { return v.RequestHeadersToAdds }).(URLMapHeaderActionRequestHeadersToAddArrayOutput) -} - -// A list of header names for headers that need to be removed from the request prior to -// forwarding the request to the backendService. -func (o URLMapHeaderActionOutput) RequestHeadersToRemoves() pulumi.StringArrayOutput { - return o.ApplyT(func(v URLMapHeaderAction) []string { return v.RequestHeadersToRemoves }).(pulumi.StringArrayOutput) -} - -// Headers to add the response prior to sending the response back to the client. -// Structure is documented below. -func (o URLMapHeaderActionOutput) ResponseHeadersToAdds() URLMapHeaderActionResponseHeadersToAddArrayOutput { - return o.ApplyT(func(v URLMapHeaderAction) []URLMapHeaderActionResponseHeadersToAdd { return v.ResponseHeadersToAdds }).(URLMapHeaderActionResponseHeadersToAddArrayOutput) -} - -// A list of header names for headers that need to be removed from the response prior to sending the -// response back to the client. -func (o URLMapHeaderActionOutput) ResponseHeadersToRemoves() pulumi.StringArrayOutput { - return o.ApplyT(func(v URLMapHeaderAction) []string { return v.ResponseHeadersToRemoves }).(pulumi.StringArrayOutput) -} - -type URLMapHeaderActionPtrOutput struct{ *pulumi.OutputState } - -func (URLMapHeaderActionPtrOutput) ElementType() reflect.Type { - return reflect.TypeOf((**URLMapHeaderAction)(nil)).Elem() -} - -func (o URLMapHeaderActionPtrOutput) ToURLMapHeaderActionPtrOutput() URLMapHeaderActionPtrOutput { - return o -} - -func (o URLMapHeaderActionPtrOutput) ToURLMapHeaderActionPtrOutputWithContext(ctx context.Context) URLMapHeaderActionPtrOutput { - return o -} - -func (o URLMapHeaderActionPtrOutput) Elem() URLMapHeaderActionOutput { - return o.ApplyT(func(v *URLMapHeaderAction) URLMapHeaderAction { - if v != nil { - return *v - } - var ret URLMapHeaderAction - return ret - }).(URLMapHeaderActionOutput) -} - -// Headers to add to a matching request prior to forwarding the request to the backendService. -// Structure is documented below. -func (o URLMapHeaderActionPtrOutput) RequestHeadersToAdds() URLMapHeaderActionRequestHeadersToAddArrayOutput { - return o.ApplyT(func(v *URLMapHeaderAction) []URLMapHeaderActionRequestHeadersToAdd { - if v == nil { - return nil - } - return v.RequestHeadersToAdds - }).(URLMapHeaderActionRequestHeadersToAddArrayOutput) -} - -// A list of header names for headers that need to be removed from the request prior to -// forwarding the request to the backendService. -func (o URLMapHeaderActionPtrOutput) RequestHeadersToRemoves() pulumi.StringArrayOutput { - return o.ApplyT(func(v *URLMapHeaderAction) []string { - if v == nil { - return nil - } - return v.RequestHeadersToRemoves - }).(pulumi.StringArrayOutput) -} - -// Headers to add the response prior to sending the response back to the client. -// Structure is documented below. -func (o URLMapHeaderActionPtrOutput) ResponseHeadersToAdds() URLMapHeaderActionResponseHeadersToAddArrayOutput { - return o.ApplyT(func(v *URLMapHeaderAction) []URLMapHeaderActionResponseHeadersToAdd { - if v == nil { - return nil - } - return v.ResponseHeadersToAdds - }).(URLMapHeaderActionResponseHeadersToAddArrayOutput) -} - -// A list of header names for headers that need to be removed from the response prior to sending the -// response back to the client. -func (o URLMapHeaderActionPtrOutput) ResponseHeadersToRemoves() pulumi.StringArrayOutput { - return o.ApplyT(func(v *URLMapHeaderAction) []string { - if v == nil { - return nil - } - return v.ResponseHeadersToRemoves - }).(pulumi.StringArrayOutput) -} - func init() { pulumi.RegisterInputType(reflect.TypeOf((*AutoscalerAutoscalingPolicyInput)(nil)).Elem(), AutoscalerAutoscalingPolicyArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*AutoscalerAutoscalingPolicyPtrInput)(nil)).Elem(), AutoscalerAutoscalingPolicyArgs{}) @@ -89467,6 +89416,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*NodeGroupShareSettingsPtrInput)(nil)).Elem(), NodeGroupShareSettingsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*NodeGroupShareSettingsProjectMapInput)(nil)).Elem(), NodeGroupShareSettingsProjectMapArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*NodeGroupShareSettingsProjectMapArrayInput)(nil)).Elem(), NodeGroupShareSettingsProjectMapArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*NodeTemplateAcceleratorInput)(nil)).Elem(), NodeTemplateAcceleratorArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*NodeTemplateAcceleratorArrayInput)(nil)).Elem(), NodeTemplateAcceleratorArray{}) pulumi.RegisterInputType(reflect.TypeOf((*NodeTemplateNodeTypeFlexibilityInput)(nil)).Elem(), NodeTemplateNodeTypeFlexibilityArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*NodeTemplateNodeTypeFlexibilityPtrInput)(nil)).Elem(), NodeTemplateNodeTypeFlexibilityArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*NodeTemplateServerBindingInput)(nil)).Elem(), NodeTemplateServerBindingArgs{}) @@ -90073,8 +90024,6 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*URLMapDefaultRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAddArrayInput)(nil)).Elem(), URLMapDefaultRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAddArray{}) pulumi.RegisterInputType(reflect.TypeOf((*URLMapDefaultUrlRedirectInput)(nil)).Elem(), URLMapDefaultUrlRedirectArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*URLMapDefaultUrlRedirectPtrInput)(nil)).Elem(), URLMapDefaultUrlRedirectArgs{}) - pulumi.RegisterInputType(reflect.TypeOf((*URLMapHeaderActionInput)(nil)).Elem(), URLMapHeaderActionArgs{}) - pulumi.RegisterInputType(reflect.TypeOf((*URLMapHeaderActionPtrInput)(nil)).Elem(), URLMapHeaderActionArgs{}) pulumi.RegisterOutputType(AutoscalerAutoscalingPolicyOutput{}) pulumi.RegisterOutputType(AutoscalerAutoscalingPolicyPtrOutput{}) pulumi.RegisterOutputType(AutoscalerAutoscalingPolicyCpuUtilizationOutput{}) @@ -90467,6 +90416,8 @@ func init() { pulumi.RegisterOutputType(NodeGroupShareSettingsPtrOutput{}) pulumi.RegisterOutputType(NodeGroupShareSettingsProjectMapOutput{}) pulumi.RegisterOutputType(NodeGroupShareSettingsProjectMapArrayOutput{}) + pulumi.RegisterOutputType(NodeTemplateAcceleratorOutput{}) + pulumi.RegisterOutputType(NodeTemplateAcceleratorArrayOutput{}) pulumi.RegisterOutputType(NodeTemplateNodeTypeFlexibilityOutput{}) pulumi.RegisterOutputType(NodeTemplateNodeTypeFlexibilityPtrOutput{}) pulumi.RegisterOutputType(NodeTemplateServerBindingOutput{}) @@ -91073,6 +91024,4 @@ func init() { pulumi.RegisterOutputType(URLMapDefaultRouteActionWeightedBackendServiceHeaderActionResponseHeadersToAddArrayOutput{}) pulumi.RegisterOutputType(URLMapDefaultUrlRedirectOutput{}) pulumi.RegisterOutputType(URLMapDefaultUrlRedirectPtrOutput{}) - pulumi.RegisterOutputType(URLMapHeaderActionOutput{}) - pulumi.RegisterOutputType(URLMapHeaderActionPtrOutput{}) } diff --git a/sdk/go/gcp/compute/pulumiTypes1.go b/sdk/go/gcp/compute/pulumiTypes1.go index 862e7b4355..6a9749e289 100644 --- a/sdk/go/gcp/compute/pulumiTypes1.go +++ b/sdk/go/gcp/compute/pulumiTypes1.go @@ -13,6 +13,216 @@ import ( var _ = internal.GetEnvOrDefault +type URLMapHeaderAction struct { + // Headers to add to a matching request prior to forwarding the request to the backendService. + // Structure is documented below. + RequestHeadersToAdds []URLMapHeaderActionRequestHeadersToAdd `pulumi:"requestHeadersToAdds"` + // A list of header names for headers that need to be removed from the request prior to + // forwarding the request to the backendService. + RequestHeadersToRemoves []string `pulumi:"requestHeadersToRemoves"` + // Headers to add the response prior to sending the response back to the client. + // Structure is documented below. + ResponseHeadersToAdds []URLMapHeaderActionResponseHeadersToAdd `pulumi:"responseHeadersToAdds"` + // A list of header names for headers that need to be removed from the response prior to sending the + // response back to the client. + ResponseHeadersToRemoves []string `pulumi:"responseHeadersToRemoves"` +} + +// URLMapHeaderActionInput is an input type that accepts URLMapHeaderActionArgs and URLMapHeaderActionOutput values. +// You can construct a concrete instance of `URLMapHeaderActionInput` via: +// +// URLMapHeaderActionArgs{...} +type URLMapHeaderActionInput interface { + pulumi.Input + + ToURLMapHeaderActionOutput() URLMapHeaderActionOutput + ToURLMapHeaderActionOutputWithContext(context.Context) URLMapHeaderActionOutput +} + +type URLMapHeaderActionArgs struct { + // Headers to add to a matching request prior to forwarding the request to the backendService. + // Structure is documented below. + RequestHeadersToAdds URLMapHeaderActionRequestHeadersToAddArrayInput `pulumi:"requestHeadersToAdds"` + // A list of header names for headers that need to be removed from the request prior to + // forwarding the request to the backendService. + RequestHeadersToRemoves pulumi.StringArrayInput `pulumi:"requestHeadersToRemoves"` + // Headers to add the response prior to sending the response back to the client. + // Structure is documented below. + ResponseHeadersToAdds URLMapHeaderActionResponseHeadersToAddArrayInput `pulumi:"responseHeadersToAdds"` + // A list of header names for headers that need to be removed from the response prior to sending the + // response back to the client. + ResponseHeadersToRemoves pulumi.StringArrayInput `pulumi:"responseHeadersToRemoves"` +} + +func (URLMapHeaderActionArgs) ElementType() reflect.Type { + return reflect.TypeOf((*URLMapHeaderAction)(nil)).Elem() +} + +func (i URLMapHeaderActionArgs) ToURLMapHeaderActionOutput() URLMapHeaderActionOutput { + return i.ToURLMapHeaderActionOutputWithContext(context.Background()) +} + +func (i URLMapHeaderActionArgs) ToURLMapHeaderActionOutputWithContext(ctx context.Context) URLMapHeaderActionOutput { + return pulumi.ToOutputWithContext(ctx, i).(URLMapHeaderActionOutput) +} + +func (i URLMapHeaderActionArgs) ToURLMapHeaderActionPtrOutput() URLMapHeaderActionPtrOutput { + return i.ToURLMapHeaderActionPtrOutputWithContext(context.Background()) +} + +func (i URLMapHeaderActionArgs) ToURLMapHeaderActionPtrOutputWithContext(ctx context.Context) URLMapHeaderActionPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(URLMapHeaderActionOutput).ToURLMapHeaderActionPtrOutputWithContext(ctx) +} + +// URLMapHeaderActionPtrInput is an input type that accepts URLMapHeaderActionArgs, URLMapHeaderActionPtr and URLMapHeaderActionPtrOutput values. +// You can construct a concrete instance of `URLMapHeaderActionPtrInput` via: +// +// URLMapHeaderActionArgs{...} +// +// or: +// +// nil +type URLMapHeaderActionPtrInput interface { + pulumi.Input + + ToURLMapHeaderActionPtrOutput() URLMapHeaderActionPtrOutput + ToURLMapHeaderActionPtrOutputWithContext(context.Context) URLMapHeaderActionPtrOutput +} + +type urlmapHeaderActionPtrType URLMapHeaderActionArgs + +func URLMapHeaderActionPtr(v *URLMapHeaderActionArgs) URLMapHeaderActionPtrInput { + return (*urlmapHeaderActionPtrType)(v) +} + +func (*urlmapHeaderActionPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**URLMapHeaderAction)(nil)).Elem() +} + +func (i *urlmapHeaderActionPtrType) ToURLMapHeaderActionPtrOutput() URLMapHeaderActionPtrOutput { + return i.ToURLMapHeaderActionPtrOutputWithContext(context.Background()) +} + +func (i *urlmapHeaderActionPtrType) ToURLMapHeaderActionPtrOutputWithContext(ctx context.Context) URLMapHeaderActionPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(URLMapHeaderActionPtrOutput) +} + +type URLMapHeaderActionOutput struct{ *pulumi.OutputState } + +func (URLMapHeaderActionOutput) ElementType() reflect.Type { + return reflect.TypeOf((*URLMapHeaderAction)(nil)).Elem() +} + +func (o URLMapHeaderActionOutput) ToURLMapHeaderActionOutput() URLMapHeaderActionOutput { + return o +} + +func (o URLMapHeaderActionOutput) ToURLMapHeaderActionOutputWithContext(ctx context.Context) URLMapHeaderActionOutput { + return o +} + +func (o URLMapHeaderActionOutput) ToURLMapHeaderActionPtrOutput() URLMapHeaderActionPtrOutput { + return o.ToURLMapHeaderActionPtrOutputWithContext(context.Background()) +} + +func (o URLMapHeaderActionOutput) ToURLMapHeaderActionPtrOutputWithContext(ctx context.Context) URLMapHeaderActionPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v URLMapHeaderAction) *URLMapHeaderAction { + return &v + }).(URLMapHeaderActionPtrOutput) +} + +// Headers to add to a matching request prior to forwarding the request to the backendService. +// Structure is documented below. +func (o URLMapHeaderActionOutput) RequestHeadersToAdds() URLMapHeaderActionRequestHeadersToAddArrayOutput { + return o.ApplyT(func(v URLMapHeaderAction) []URLMapHeaderActionRequestHeadersToAdd { return v.RequestHeadersToAdds }).(URLMapHeaderActionRequestHeadersToAddArrayOutput) +} + +// A list of header names for headers that need to be removed from the request prior to +// forwarding the request to the backendService. +func (o URLMapHeaderActionOutput) RequestHeadersToRemoves() pulumi.StringArrayOutput { + return o.ApplyT(func(v URLMapHeaderAction) []string { return v.RequestHeadersToRemoves }).(pulumi.StringArrayOutput) +} + +// Headers to add the response prior to sending the response back to the client. +// Structure is documented below. +func (o URLMapHeaderActionOutput) ResponseHeadersToAdds() URLMapHeaderActionResponseHeadersToAddArrayOutput { + return o.ApplyT(func(v URLMapHeaderAction) []URLMapHeaderActionResponseHeadersToAdd { return v.ResponseHeadersToAdds }).(URLMapHeaderActionResponseHeadersToAddArrayOutput) +} + +// A list of header names for headers that need to be removed from the response prior to sending the +// response back to the client. +func (o URLMapHeaderActionOutput) ResponseHeadersToRemoves() pulumi.StringArrayOutput { + return o.ApplyT(func(v URLMapHeaderAction) []string { return v.ResponseHeadersToRemoves }).(pulumi.StringArrayOutput) +} + +type URLMapHeaderActionPtrOutput struct{ *pulumi.OutputState } + +func (URLMapHeaderActionPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**URLMapHeaderAction)(nil)).Elem() +} + +func (o URLMapHeaderActionPtrOutput) ToURLMapHeaderActionPtrOutput() URLMapHeaderActionPtrOutput { + return o +} + +func (o URLMapHeaderActionPtrOutput) ToURLMapHeaderActionPtrOutputWithContext(ctx context.Context) URLMapHeaderActionPtrOutput { + return o +} + +func (o URLMapHeaderActionPtrOutput) Elem() URLMapHeaderActionOutput { + return o.ApplyT(func(v *URLMapHeaderAction) URLMapHeaderAction { + if v != nil { + return *v + } + var ret URLMapHeaderAction + return ret + }).(URLMapHeaderActionOutput) +} + +// Headers to add to a matching request prior to forwarding the request to the backendService. +// Structure is documented below. +func (o URLMapHeaderActionPtrOutput) RequestHeadersToAdds() URLMapHeaderActionRequestHeadersToAddArrayOutput { + return o.ApplyT(func(v *URLMapHeaderAction) []URLMapHeaderActionRequestHeadersToAdd { + if v == nil { + return nil + } + return v.RequestHeadersToAdds + }).(URLMapHeaderActionRequestHeadersToAddArrayOutput) +} + +// A list of header names for headers that need to be removed from the request prior to +// forwarding the request to the backendService. +func (o URLMapHeaderActionPtrOutput) RequestHeadersToRemoves() pulumi.StringArrayOutput { + return o.ApplyT(func(v *URLMapHeaderAction) []string { + if v == nil { + return nil + } + return v.RequestHeadersToRemoves + }).(pulumi.StringArrayOutput) +} + +// Headers to add the response prior to sending the response back to the client. +// Structure is documented below. +func (o URLMapHeaderActionPtrOutput) ResponseHeadersToAdds() URLMapHeaderActionResponseHeadersToAddArrayOutput { + return o.ApplyT(func(v *URLMapHeaderAction) []URLMapHeaderActionResponseHeadersToAdd { + if v == nil { + return nil + } + return v.ResponseHeadersToAdds + }).(URLMapHeaderActionResponseHeadersToAddArrayOutput) +} + +// A list of header names for headers that need to be removed from the response prior to sending the +// response back to the client. +func (o URLMapHeaderActionPtrOutput) ResponseHeadersToRemoves() pulumi.StringArrayOutput { + return o.ApplyT(func(v *URLMapHeaderAction) []string { + if v == nil { + return nil + } + return v.ResponseHeadersToRemoves + }).(pulumi.StringArrayOutput) +} + type URLMapHeaderActionRequestHeadersToAdd struct { // The name of the header to add. HeaderName string `pulumi:"headerName"` @@ -20919,6 +21129,8 @@ type GetInstanceBootDisk struct { // Parameters with which a disk was created alongside the instance. // Structure is documented below. InitializeParams []GetInstanceBootDiskInitializeParam `pulumi:"initializeParams"` + // The disk interface used for attaching this disk. One of `SCSI` or `NVME`. + Interface string `pulumi:"interface"` // The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. KmsKeySelfLink string `pulumi:"kmsKeySelfLink"` // Read/write mode for the disk. One of `"READ_ONLY"` or `"READ_WRITE"`. @@ -20953,6 +21165,8 @@ type GetInstanceBootDiskArgs struct { // Parameters with which a disk was created alongside the instance. // Structure is documented below. InitializeParams GetInstanceBootDiskInitializeParamArrayInput `pulumi:"initializeParams"` + // The disk interface used for attaching this disk. One of `SCSI` or `NVME`. + Interface pulumi.StringInput `pulumi:"interface"` // The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. KmsKeySelfLink pulumi.StringInput `pulumi:"kmsKeySelfLink"` // Read/write mode for the disk. One of `"READ_ONLY"` or `"READ_WRITE"`. @@ -21041,6 +21255,11 @@ func (o GetInstanceBootDiskOutput) InitializeParams() GetInstanceBootDiskInitial return o.ApplyT(func(v GetInstanceBootDisk) []GetInstanceBootDiskInitializeParam { return v.InitializeParams }).(GetInstanceBootDiskInitializeParamArrayOutput) } +// The disk interface used for attaching this disk. One of `SCSI` or `NVME`. +func (o GetInstanceBootDiskOutput) Interface() pulumi.StringOutput { + return o.ApplyT(func(v GetInstanceBootDisk) string { return v.Interface }).(pulumi.StringOutput) +} + // The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. func (o GetInstanceBootDiskOutput) KmsKeySelfLink() pulumi.StringOutput { return o.ApplyT(func(v GetInstanceBootDisk) string { return v.KmsKeySelfLink }).(pulumi.StringOutput) @@ -40145,6 +40364,8 @@ func (o GetSubnetworksSubnetworkArrayOutput) Index(i pulumi.IntInput) GetSubnetw } func init() { + pulumi.RegisterInputType(reflect.TypeOf((*URLMapHeaderActionInput)(nil)).Elem(), URLMapHeaderActionArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*URLMapHeaderActionPtrInput)(nil)).Elem(), URLMapHeaderActionArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*URLMapHeaderActionRequestHeadersToAddInput)(nil)).Elem(), URLMapHeaderActionRequestHeadersToAddArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*URLMapHeaderActionRequestHeadersToAddArrayInput)(nil)).Elem(), URLMapHeaderActionRequestHeadersToAddArray{}) pulumi.RegisterInputType(reflect.TypeOf((*URLMapHeaderActionResponseHeadersToAddInput)(nil)).Elem(), URLMapHeaderActionResponseHeadersToAddArgs{}) @@ -40681,6 +40902,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetSubnetworkSecondaryIpRangeArrayInput)(nil)).Elem(), GetSubnetworkSecondaryIpRangeArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetSubnetworksSubnetworkInput)(nil)).Elem(), GetSubnetworksSubnetworkArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetSubnetworksSubnetworkArrayInput)(nil)).Elem(), GetSubnetworksSubnetworkArray{}) + pulumi.RegisterOutputType(URLMapHeaderActionOutput{}) + pulumi.RegisterOutputType(URLMapHeaderActionPtrOutput{}) pulumi.RegisterOutputType(URLMapHeaderActionRequestHeadersToAddOutput{}) pulumi.RegisterOutputType(URLMapHeaderActionRequestHeadersToAddArrayOutput{}) pulumi.RegisterOutputType(URLMapHeaderActionResponseHeadersToAddOutput{}) diff --git a/sdk/go/gcp/compute/targetHttpsProxy.go b/sdk/go/gcp/compute/targetHttpsProxy.go index 977bc3c073..7767c6222f 100644 --- a/sdk/go/gcp/compute/targetHttpsProxy.go +++ b/sdk/go/gcp/compute/targetHttpsProxy.go @@ -565,6 +565,10 @@ type TargetHttpsProxy struct { // INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED // loadBalancingScheme consult ServerTlsPolicy documentation. // If left blank, communications are not encrypted. + // If you remove this field from your configuration at the same time as + // deleting or recreating a referenced ServerTlsPolicy resource, you will + // receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + // within the ServerTlsPolicy resource to avoid this. ServerTlsPolicy pulumi.StringPtrOutput `pulumi:"serverTlsPolicy"` // URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. // Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. @@ -676,6 +680,10 @@ type targetHttpsProxyState struct { // INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED // loadBalancingScheme consult ServerTlsPolicy documentation. // If left blank, communications are not encrypted. + // If you remove this field from your configuration at the same time as + // deleting or recreating a referenced ServerTlsPolicy resource, you will + // receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + // within the ServerTlsPolicy resource to avoid this. ServerTlsPolicy *string `pulumi:"serverTlsPolicy"` // URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. // Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. @@ -755,6 +763,10 @@ type TargetHttpsProxyState struct { // INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED // loadBalancingScheme consult ServerTlsPolicy documentation. // If left blank, communications are not encrypted. + // If you remove this field from your configuration at the same time as + // deleting or recreating a referenced ServerTlsPolicy resource, you will + // receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + // within the ServerTlsPolicy resource to avoid this. ServerTlsPolicy pulumi.StringPtrInput // URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. // Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. @@ -832,6 +844,10 @@ type targetHttpsProxyArgs struct { // INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED // loadBalancingScheme consult ServerTlsPolicy documentation. // If left blank, communications are not encrypted. + // If you remove this field from your configuration at the same time as + // deleting or recreating a referenced ServerTlsPolicy resource, you will + // receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + // within the ServerTlsPolicy resource to avoid this. ServerTlsPolicy *string `pulumi:"serverTlsPolicy"` // URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. // Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. @@ -906,6 +922,10 @@ type TargetHttpsProxyArgs struct { // INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED // loadBalancingScheme consult ServerTlsPolicy documentation. // If left blank, communications are not encrypted. + // If you remove this field from your configuration at the same time as + // deleting or recreating a referenced ServerTlsPolicy resource, you will + // receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + // within the ServerTlsPolicy resource to avoid this. ServerTlsPolicy pulumi.StringPtrInput // URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. // Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. @@ -1104,6 +1124,10 @@ func (o TargetHttpsProxyOutput) SelfLink() pulumi.StringOutput { // INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED // loadBalancingScheme consult ServerTlsPolicy documentation. // If left blank, communications are not encrypted. +// If you remove this field from your configuration at the same time as +// deleting or recreating a referenced ServerTlsPolicy resource, you will +// receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy +// within the ServerTlsPolicy resource to avoid this. func (o TargetHttpsProxyOutput) ServerTlsPolicy() pulumi.StringPtrOutput { return o.ApplyT(func(v *TargetHttpsProxy) pulumi.StringPtrOutput { return v.ServerTlsPolicy }).(pulumi.StringPtrOutput) } diff --git a/sdk/go/gcp/container/attachedCluster.go b/sdk/go/gcp/container/attachedCluster.go index 994a012c87..e943185b67 100644 --- a/sdk/go/gcp/container/attachedCluster.go +++ b/sdk/go/gcp/container/attachedCluster.go @@ -261,7 +261,7 @@ type AttachedCluster struct { ClusterRegion pulumi.StringOutput `pulumi:"clusterRegion"` // Output only. The time at which this cluster was created. CreateTime pulumi.StringOutput `pulumi:"createTime"` - // Policy to determine what flags to send on delete. + // Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS DeletionPolicy pulumi.StringPtrOutput `pulumi:"deletionPolicy"` // A human readable description of this attached cluster. Cannot be longer than 255 UTF-8 encoded bytes. Description pulumi.StringPtrOutput `pulumi:"description"` @@ -379,7 +379,7 @@ type attachedClusterState struct { ClusterRegion *string `pulumi:"clusterRegion"` // Output only. The time at which this cluster was created. CreateTime *string `pulumi:"createTime"` - // Policy to determine what flags to send on delete. + // Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS DeletionPolicy *string `pulumi:"deletionPolicy"` // A human readable description of this attached cluster. Cannot be longer than 255 UTF-8 encoded bytes. Description *string `pulumi:"description"` @@ -453,7 +453,7 @@ type AttachedClusterState struct { ClusterRegion pulumi.StringPtrInput // Output only. The time at which this cluster was created. CreateTime pulumi.StringPtrInput - // Policy to determine what flags to send on delete. + // Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS DeletionPolicy pulumi.StringPtrInput // A human readable description of this attached cluster. Cannot be longer than 255 UTF-8 encoded bytes. Description pulumi.StringPtrInput @@ -525,7 +525,7 @@ type attachedClusterArgs struct { Authorization *AttachedClusterAuthorization `pulumi:"authorization"` // Binary Authorization configuration. BinaryAuthorization *AttachedClusterBinaryAuthorization `pulumi:"binaryAuthorization"` - // Policy to determine what flags to send on delete. + // Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS DeletionPolicy *string `pulumi:"deletionPolicy"` // A human readable description of this attached cluster. Cannot be longer than 255 UTF-8 encoded bytes. Description *string `pulumi:"description"` @@ -575,7 +575,7 @@ type AttachedClusterArgs struct { Authorization AttachedClusterAuthorizationPtrInput // Binary Authorization configuration. BinaryAuthorization AttachedClusterBinaryAuthorizationPtrInput - // Policy to determine what flags to send on delete. + // Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS DeletionPolicy pulumi.StringPtrInput // A human readable description of this attached cluster. Cannot be longer than 255 UTF-8 encoded bytes. Description pulumi.StringPtrInput @@ -731,7 +731,7 @@ func (o AttachedClusterOutput) CreateTime() pulumi.StringOutput { return o.ApplyT(func(v *AttachedCluster) pulumi.StringOutput { return v.CreateTime }).(pulumi.StringOutput) } -// Policy to determine what flags to send on delete. +// Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS func (o AttachedClusterOutput) DeletionPolicy() pulumi.StringPtrOutput { return o.ApplyT(func(v *AttachedCluster) pulumi.StringPtrOutput { return v.DeletionPolicy }).(pulumi.StringPtrOutput) } diff --git a/sdk/go/gcp/container/pulumiTypes.go b/sdk/go/gcp/container/pulumiTypes.go index 061ca9db25..e0329c0a1f 100644 --- a/sdk/go/gcp/container/pulumiTypes.go +++ b/sdk/go/gcp/container/pulumiTypes.go @@ -22974,6 +22974,8 @@ type ClusterNodeConfigKubeletConfig struct { // [K8S CPU Management Policies](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/). // One of `"none"` or `"static"`. Defaults to `none` when `kubeletConfig` is unset. CpuManagerPolicy string `pulumi:"cpuManagerPolicy"` + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled *string `pulumi:"insecureKubeletReadonlyPortEnabled"` // Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. PodPidsLimit *int `pulumi:"podPidsLimit"` } @@ -23007,6 +23009,8 @@ type ClusterNodeConfigKubeletConfigArgs struct { // [K8S CPU Management Policies](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/). // One of `"none"` or `"static"`. Defaults to `none` when `kubeletConfig` is unset. CpuManagerPolicy pulumi.StringInput `pulumi:"cpuManagerPolicy"` + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled pulumi.StringPtrInput `pulumi:"insecureKubeletReadonlyPortEnabled"` // Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. PodPidsLimit pulumi.IntPtrInput `pulumi:"podPidsLimit"` } @@ -23114,6 +23118,11 @@ func (o ClusterNodeConfigKubeletConfigOutput) CpuManagerPolicy() pulumi.StringOu return o.ApplyT(func(v ClusterNodeConfigKubeletConfig) string { return v.CpuManagerPolicy }).(pulumi.StringOutput) } +// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. +func (o ClusterNodeConfigKubeletConfigOutput) InsecureKubeletReadonlyPortEnabled() pulumi.StringPtrOutput { + return o.ApplyT(func(v ClusterNodeConfigKubeletConfig) *string { return v.InsecureKubeletReadonlyPortEnabled }).(pulumi.StringPtrOutput) +} + // Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. func (o ClusterNodeConfigKubeletConfigOutput) PodPidsLimit() pulumi.IntPtrOutput { return o.ApplyT(func(v ClusterNodeConfigKubeletConfig) *int { return v.PodPidsLimit }).(pulumi.IntPtrOutput) @@ -23184,6 +23193,16 @@ func (o ClusterNodeConfigKubeletConfigPtrOutput) CpuManagerPolicy() pulumi.Strin }).(pulumi.StringPtrOutput) } +// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. +func (o ClusterNodeConfigKubeletConfigPtrOutput) InsecureKubeletReadonlyPortEnabled() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ClusterNodeConfigKubeletConfig) *string { + if v == nil { + return nil + } + return v.InsecureKubeletReadonlyPortEnabled + }).(pulumi.StringPtrOutput) +} + // Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. func (o ClusterNodeConfigKubeletConfigPtrOutput) PodPidsLimit() pulumi.IntPtrOutput { return o.ApplyT(func(v *ClusterNodeConfigKubeletConfig) *int { @@ -24966,8 +24985,11 @@ func (o ClusterNodePoolArrayOutput) Index(i pulumi.IntInput) ClusterNodePoolOutp } type ClusterNodePoolAutoConfig struct { - // The network tag config for the cluster's automatically provisioned node pools. + // The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. NetworkTags *ClusterNodePoolAutoConfigNetworkTags `pulumi:"networkTags"` + // Kubelet configuration for Autopilot clusters. Currently, only `insecureKubeletReadonlyPortEnabled` is supported here. + // Structure is documented below. + NodeKubeletConfig *ClusterNodePoolAutoConfigNodeKubeletConfig `pulumi:"nodeKubeletConfig"` // A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. ResourceManagerTags map[string]string `pulumi:"resourceManagerTags"` } @@ -24984,8 +25006,11 @@ type ClusterNodePoolAutoConfigInput interface { } type ClusterNodePoolAutoConfigArgs struct { - // The network tag config for the cluster's automatically provisioned node pools. + // The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. NetworkTags ClusterNodePoolAutoConfigNetworkTagsPtrInput `pulumi:"networkTags"` + // Kubelet configuration for Autopilot clusters. Currently, only `insecureKubeletReadonlyPortEnabled` is supported here. + // Structure is documented below. + NodeKubeletConfig ClusterNodePoolAutoConfigNodeKubeletConfigPtrInput `pulumi:"nodeKubeletConfig"` // A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. ResourceManagerTags pulumi.StringMapInput `pulumi:"resourceManagerTags"` } @@ -25067,11 +25092,19 @@ func (o ClusterNodePoolAutoConfigOutput) ToClusterNodePoolAutoConfigPtrOutputWit }).(ClusterNodePoolAutoConfigPtrOutput) } -// The network tag config for the cluster's automatically provisioned node pools. +// The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. func (o ClusterNodePoolAutoConfigOutput) NetworkTags() ClusterNodePoolAutoConfigNetworkTagsPtrOutput { return o.ApplyT(func(v ClusterNodePoolAutoConfig) *ClusterNodePoolAutoConfigNetworkTags { return v.NetworkTags }).(ClusterNodePoolAutoConfigNetworkTagsPtrOutput) } +// Kubelet configuration for Autopilot clusters. Currently, only `insecureKubeletReadonlyPortEnabled` is supported here. +// Structure is documented below. +func (o ClusterNodePoolAutoConfigOutput) NodeKubeletConfig() ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput { + return o.ApplyT(func(v ClusterNodePoolAutoConfig) *ClusterNodePoolAutoConfigNodeKubeletConfig { + return v.NodeKubeletConfig + }).(ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput) +} + // A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. func (o ClusterNodePoolAutoConfigOutput) ResourceManagerTags() pulumi.StringMapOutput { return o.ApplyT(func(v ClusterNodePoolAutoConfig) map[string]string { return v.ResourceManagerTags }).(pulumi.StringMapOutput) @@ -25101,7 +25134,7 @@ func (o ClusterNodePoolAutoConfigPtrOutput) Elem() ClusterNodePoolAutoConfigOutp }).(ClusterNodePoolAutoConfigOutput) } -// The network tag config for the cluster's automatically provisioned node pools. +// The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. func (o ClusterNodePoolAutoConfigPtrOutput) NetworkTags() ClusterNodePoolAutoConfigNetworkTagsPtrOutput { return o.ApplyT(func(v *ClusterNodePoolAutoConfig) *ClusterNodePoolAutoConfigNetworkTags { if v == nil { @@ -25111,6 +25144,17 @@ func (o ClusterNodePoolAutoConfigPtrOutput) NetworkTags() ClusterNodePoolAutoCon }).(ClusterNodePoolAutoConfigNetworkTagsPtrOutput) } +// Kubelet configuration for Autopilot clusters. Currently, only `insecureKubeletReadonlyPortEnabled` is supported here. +// Structure is documented below. +func (o ClusterNodePoolAutoConfigPtrOutput) NodeKubeletConfig() ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput { + return o.ApplyT(func(v *ClusterNodePoolAutoConfig) *ClusterNodePoolAutoConfigNodeKubeletConfig { + if v == nil { + return nil + } + return v.NodeKubeletConfig + }).(ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput) +} + // A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. func (o ClusterNodePoolAutoConfigPtrOutput) ResourceManagerTags() pulumi.StringMapOutput { return o.ApplyT(func(v *ClusterNodePoolAutoConfig) map[string]string { @@ -25258,6 +25302,145 @@ func (o ClusterNodePoolAutoConfigNetworkTagsPtrOutput) Tags() pulumi.StringArray }).(pulumi.StringArrayOutput) } +type ClusterNodePoolAutoConfigNodeKubeletConfig struct { + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled *string `pulumi:"insecureKubeletReadonlyPortEnabled"` +} + +// ClusterNodePoolAutoConfigNodeKubeletConfigInput is an input type that accepts ClusterNodePoolAutoConfigNodeKubeletConfigArgs and ClusterNodePoolAutoConfigNodeKubeletConfigOutput values. +// You can construct a concrete instance of `ClusterNodePoolAutoConfigNodeKubeletConfigInput` via: +// +// ClusterNodePoolAutoConfigNodeKubeletConfigArgs{...} +type ClusterNodePoolAutoConfigNodeKubeletConfigInput interface { + pulumi.Input + + ToClusterNodePoolAutoConfigNodeKubeletConfigOutput() ClusterNodePoolAutoConfigNodeKubeletConfigOutput + ToClusterNodePoolAutoConfigNodeKubeletConfigOutputWithContext(context.Context) ClusterNodePoolAutoConfigNodeKubeletConfigOutput +} + +type ClusterNodePoolAutoConfigNodeKubeletConfigArgs struct { + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled pulumi.StringPtrInput `pulumi:"insecureKubeletReadonlyPortEnabled"` +} + +func (ClusterNodePoolAutoConfigNodeKubeletConfigArgs) ElementType() reflect.Type { + return reflect.TypeOf((*ClusterNodePoolAutoConfigNodeKubeletConfig)(nil)).Elem() +} + +func (i ClusterNodePoolAutoConfigNodeKubeletConfigArgs) ToClusterNodePoolAutoConfigNodeKubeletConfigOutput() ClusterNodePoolAutoConfigNodeKubeletConfigOutput { + return i.ToClusterNodePoolAutoConfigNodeKubeletConfigOutputWithContext(context.Background()) +} + +func (i ClusterNodePoolAutoConfigNodeKubeletConfigArgs) ToClusterNodePoolAutoConfigNodeKubeletConfigOutputWithContext(ctx context.Context) ClusterNodePoolAutoConfigNodeKubeletConfigOutput { + return pulumi.ToOutputWithContext(ctx, i).(ClusterNodePoolAutoConfigNodeKubeletConfigOutput) +} + +func (i ClusterNodePoolAutoConfigNodeKubeletConfigArgs) ToClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput() ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput { + return i.ToClusterNodePoolAutoConfigNodeKubeletConfigPtrOutputWithContext(context.Background()) +} + +func (i ClusterNodePoolAutoConfigNodeKubeletConfigArgs) ToClusterNodePoolAutoConfigNodeKubeletConfigPtrOutputWithContext(ctx context.Context) ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(ClusterNodePoolAutoConfigNodeKubeletConfigOutput).ToClusterNodePoolAutoConfigNodeKubeletConfigPtrOutputWithContext(ctx) +} + +// ClusterNodePoolAutoConfigNodeKubeletConfigPtrInput is an input type that accepts ClusterNodePoolAutoConfigNodeKubeletConfigArgs, ClusterNodePoolAutoConfigNodeKubeletConfigPtr and ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput values. +// You can construct a concrete instance of `ClusterNodePoolAutoConfigNodeKubeletConfigPtrInput` via: +// +// ClusterNodePoolAutoConfigNodeKubeletConfigArgs{...} +// +// or: +// +// nil +type ClusterNodePoolAutoConfigNodeKubeletConfigPtrInput interface { + pulumi.Input + + ToClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput() ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput + ToClusterNodePoolAutoConfigNodeKubeletConfigPtrOutputWithContext(context.Context) ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput +} + +type clusterNodePoolAutoConfigNodeKubeletConfigPtrType ClusterNodePoolAutoConfigNodeKubeletConfigArgs + +func ClusterNodePoolAutoConfigNodeKubeletConfigPtr(v *ClusterNodePoolAutoConfigNodeKubeletConfigArgs) ClusterNodePoolAutoConfigNodeKubeletConfigPtrInput { + return (*clusterNodePoolAutoConfigNodeKubeletConfigPtrType)(v) +} + +func (*clusterNodePoolAutoConfigNodeKubeletConfigPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**ClusterNodePoolAutoConfigNodeKubeletConfig)(nil)).Elem() +} + +func (i *clusterNodePoolAutoConfigNodeKubeletConfigPtrType) ToClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput() ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput { + return i.ToClusterNodePoolAutoConfigNodeKubeletConfigPtrOutputWithContext(context.Background()) +} + +func (i *clusterNodePoolAutoConfigNodeKubeletConfigPtrType) ToClusterNodePoolAutoConfigNodeKubeletConfigPtrOutputWithContext(ctx context.Context) ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput) +} + +type ClusterNodePoolAutoConfigNodeKubeletConfigOutput struct{ *pulumi.OutputState } + +func (ClusterNodePoolAutoConfigNodeKubeletConfigOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ClusterNodePoolAutoConfigNodeKubeletConfig)(nil)).Elem() +} + +func (o ClusterNodePoolAutoConfigNodeKubeletConfigOutput) ToClusterNodePoolAutoConfigNodeKubeletConfigOutput() ClusterNodePoolAutoConfigNodeKubeletConfigOutput { + return o +} + +func (o ClusterNodePoolAutoConfigNodeKubeletConfigOutput) ToClusterNodePoolAutoConfigNodeKubeletConfigOutputWithContext(ctx context.Context) ClusterNodePoolAutoConfigNodeKubeletConfigOutput { + return o +} + +func (o ClusterNodePoolAutoConfigNodeKubeletConfigOutput) ToClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput() ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput { + return o.ToClusterNodePoolAutoConfigNodeKubeletConfigPtrOutputWithContext(context.Background()) +} + +func (o ClusterNodePoolAutoConfigNodeKubeletConfigOutput) ToClusterNodePoolAutoConfigNodeKubeletConfigPtrOutputWithContext(ctx context.Context) ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v ClusterNodePoolAutoConfigNodeKubeletConfig) *ClusterNodePoolAutoConfigNodeKubeletConfig { + return &v + }).(ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput) +} + +// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. +func (o ClusterNodePoolAutoConfigNodeKubeletConfigOutput) InsecureKubeletReadonlyPortEnabled() pulumi.StringPtrOutput { + return o.ApplyT(func(v ClusterNodePoolAutoConfigNodeKubeletConfig) *string { + return v.InsecureKubeletReadonlyPortEnabled + }).(pulumi.StringPtrOutput) +} + +type ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput struct{ *pulumi.OutputState } + +func (ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**ClusterNodePoolAutoConfigNodeKubeletConfig)(nil)).Elem() +} + +func (o ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput) ToClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput() ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput { + return o +} + +func (o ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput) ToClusterNodePoolAutoConfigNodeKubeletConfigPtrOutputWithContext(ctx context.Context) ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput { + return o +} + +func (o ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput) Elem() ClusterNodePoolAutoConfigNodeKubeletConfigOutput { + return o.ApplyT(func(v *ClusterNodePoolAutoConfigNodeKubeletConfig) ClusterNodePoolAutoConfigNodeKubeletConfig { + if v != nil { + return *v + } + var ret ClusterNodePoolAutoConfigNodeKubeletConfig + return ret + }).(ClusterNodePoolAutoConfigNodeKubeletConfigOutput) +} + +// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. +func (o ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput) InsecureKubeletReadonlyPortEnabled() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ClusterNodePoolAutoConfigNodeKubeletConfig) *string { + if v == nil { + return nil + } + return v.InsecureKubeletReadonlyPortEnabled + }).(pulumi.StringPtrOutput) +} + type ClusterNodePoolAutoscaling struct { // Location policy specifies the algorithm used when scaling-up the node pool. "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduces preemption risk for Spot VMs. LocationPolicy *string `pulumi:"locationPolicy"` @@ -25615,6 +25798,8 @@ type ClusterNodePoolDefaultsNodeConfigDefaults struct { ContainerdConfig *ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfig `pulumi:"containerdConfig"` // The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming) across all the node pools within the cluster. Structure is documented below. GcfsConfig *ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfig `pulumi:"gcfsConfig"` + // Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled *string `pulumi:"insecureKubeletReadonlyPortEnabled"` // The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. LoggingVariant *string `pulumi:"loggingVariant"` } @@ -25635,6 +25820,8 @@ type ClusterNodePoolDefaultsNodeConfigDefaultsArgs struct { ContainerdConfig ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigPtrInput `pulumi:"containerdConfig"` // The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming) across all the node pools within the cluster. Structure is documented below. GcfsConfig ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfigPtrInput `pulumi:"gcfsConfig"` + // Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled pulumi.StringPtrInput `pulumi:"insecureKubeletReadonlyPortEnabled"` // The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. LoggingVariant pulumi.StringPtrInput `pulumi:"loggingVariant"` } @@ -25730,6 +25917,11 @@ func (o ClusterNodePoolDefaultsNodeConfigDefaultsOutput) GcfsConfig() ClusterNod }).(ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfigPtrOutput) } +// Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. +func (o ClusterNodePoolDefaultsNodeConfigDefaultsOutput) InsecureKubeletReadonlyPortEnabled() pulumi.StringPtrOutput { + return o.ApplyT(func(v ClusterNodePoolDefaultsNodeConfigDefaults) *string { return v.InsecureKubeletReadonlyPortEnabled }).(pulumi.StringPtrOutput) +} + // The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. func (o ClusterNodePoolDefaultsNodeConfigDefaultsOutput) LoggingVariant() pulumi.StringPtrOutput { return o.ApplyT(func(v ClusterNodePoolDefaultsNodeConfigDefaults) *string { return v.LoggingVariant }).(pulumi.StringPtrOutput) @@ -25779,6 +25971,16 @@ func (o ClusterNodePoolDefaultsNodeConfigDefaultsPtrOutput) GcfsConfig() Cluster }).(ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfigPtrOutput) } +// Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. +func (o ClusterNodePoolDefaultsNodeConfigDefaultsPtrOutput) InsecureKubeletReadonlyPortEnabled() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ClusterNodePoolDefaultsNodeConfigDefaults) *string { + if v == nil { + return nil + } + return v.InsecureKubeletReadonlyPortEnabled + }).(pulumi.StringPtrOutput) +} + // The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. func (o ClusterNodePoolDefaultsNodeConfigDefaultsPtrOutput) LoggingVariant() pulumi.StringPtrOutput { return o.ApplyT(func(v *ClusterNodePoolDefaultsNodeConfigDefaults) *string { @@ -30623,6 +30825,8 @@ type ClusterNodePoolNodeConfigKubeletConfig struct { // [K8S CPU Management Policies](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/). // One of `"none"` or `"static"`. Defaults to `none` when `kubeletConfig` is unset. CpuManagerPolicy string `pulumi:"cpuManagerPolicy"` + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled *string `pulumi:"insecureKubeletReadonlyPortEnabled"` // Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. PodPidsLimit *int `pulumi:"podPidsLimit"` } @@ -30656,6 +30860,8 @@ type ClusterNodePoolNodeConfigKubeletConfigArgs struct { // [K8S CPU Management Policies](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/). // One of `"none"` or `"static"`. Defaults to `none` when `kubeletConfig` is unset. CpuManagerPolicy pulumi.StringInput `pulumi:"cpuManagerPolicy"` + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled pulumi.StringPtrInput `pulumi:"insecureKubeletReadonlyPortEnabled"` // Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. PodPidsLimit pulumi.IntPtrInput `pulumi:"podPidsLimit"` } @@ -30763,6 +30969,11 @@ func (o ClusterNodePoolNodeConfigKubeletConfigOutput) CpuManagerPolicy() pulumi. return o.ApplyT(func(v ClusterNodePoolNodeConfigKubeletConfig) string { return v.CpuManagerPolicy }).(pulumi.StringOutput) } +// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. +func (o ClusterNodePoolNodeConfigKubeletConfigOutput) InsecureKubeletReadonlyPortEnabled() pulumi.StringPtrOutput { + return o.ApplyT(func(v ClusterNodePoolNodeConfigKubeletConfig) *string { return v.InsecureKubeletReadonlyPortEnabled }).(pulumi.StringPtrOutput) +} + // Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. func (o ClusterNodePoolNodeConfigKubeletConfigOutput) PodPidsLimit() pulumi.IntPtrOutput { return o.ApplyT(func(v ClusterNodePoolNodeConfigKubeletConfig) *int { return v.PodPidsLimit }).(pulumi.IntPtrOutput) @@ -30833,6 +31044,16 @@ func (o ClusterNodePoolNodeConfigKubeletConfigPtrOutput) CpuManagerPolicy() pulu }).(pulumi.StringPtrOutput) } +// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. +func (o ClusterNodePoolNodeConfigKubeletConfigPtrOutput) InsecureKubeletReadonlyPortEnabled() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ClusterNodePoolNodeConfigKubeletConfig) *string { + if v == nil { + return nil + } + return v.InsecureKubeletReadonlyPortEnabled + }).(pulumi.StringPtrOutput) +} + // Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. func (o ClusterNodePoolNodeConfigKubeletConfigPtrOutput) PodPidsLimit() pulumi.IntPtrOutput { return o.ApplyT(func(v *ClusterNodePoolNodeConfigKubeletConfig) *int { @@ -40270,6 +40491,8 @@ type NodePoolNodeConfigKubeletConfig struct { CpuCfsQuotaPeriod *string `pulumi:"cpuCfsQuotaPeriod"` // Control the CPU management policy on the node. CpuManagerPolicy string `pulumi:"cpuManagerPolicy"` + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled *string `pulumi:"insecureKubeletReadonlyPortEnabled"` // Controls the maximum number of processes allowed to run in a pod. PodPidsLimit *int `pulumi:"podPidsLimit"` } @@ -40292,6 +40515,8 @@ type NodePoolNodeConfigKubeletConfigArgs struct { CpuCfsQuotaPeriod pulumi.StringPtrInput `pulumi:"cpuCfsQuotaPeriod"` // Control the CPU management policy on the node. CpuManagerPolicy pulumi.StringInput `pulumi:"cpuManagerPolicy"` + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled pulumi.StringPtrInput `pulumi:"insecureKubeletReadonlyPortEnabled"` // Controls the maximum number of processes allowed to run in a pod. PodPidsLimit pulumi.IntPtrInput `pulumi:"podPidsLimit"` } @@ -40388,6 +40613,11 @@ func (o NodePoolNodeConfigKubeletConfigOutput) CpuManagerPolicy() pulumi.StringO return o.ApplyT(func(v NodePoolNodeConfigKubeletConfig) string { return v.CpuManagerPolicy }).(pulumi.StringOutput) } +// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. +func (o NodePoolNodeConfigKubeletConfigOutput) InsecureKubeletReadonlyPortEnabled() pulumi.StringPtrOutput { + return o.ApplyT(func(v NodePoolNodeConfigKubeletConfig) *string { return v.InsecureKubeletReadonlyPortEnabled }).(pulumi.StringPtrOutput) +} + // Controls the maximum number of processes allowed to run in a pod. func (o NodePoolNodeConfigKubeletConfigOutput) PodPidsLimit() pulumi.IntPtrOutput { return o.ApplyT(func(v NodePoolNodeConfigKubeletConfig) *int { return v.PodPidsLimit }).(pulumi.IntPtrOutput) @@ -40447,6 +40677,16 @@ func (o NodePoolNodeConfigKubeletConfigPtrOutput) CpuManagerPolicy() pulumi.Stri }).(pulumi.StringPtrOutput) } +// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. +func (o NodePoolNodeConfigKubeletConfigPtrOutput) InsecureKubeletReadonlyPortEnabled() pulumi.StringPtrOutput { + return o.ApplyT(func(v *NodePoolNodeConfigKubeletConfig) *string { + if v == nil { + return nil + } + return v.InsecureKubeletReadonlyPortEnabled + }).(pulumi.StringPtrOutput) +} + // Controls the maximum number of processes allowed to run in a pod. func (o NodePoolNodeConfigKubeletConfigPtrOutput) PodPidsLimit() pulumi.IntPtrOutput { return o.ApplyT(func(v *NodePoolNodeConfigKubeletConfig) *int { @@ -50980,6 +51220,8 @@ type GetClusterNodeConfigKubeletConfig struct { CpuCfsQuotaPeriod string `pulumi:"cpuCfsQuotaPeriod"` // Control the CPU management policy on the node. CpuManagerPolicy string `pulumi:"cpuManagerPolicy"` + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled string `pulumi:"insecureKubeletReadonlyPortEnabled"` // Controls the maximum number of processes allowed to run in a pod. PodPidsLimit int `pulumi:"podPidsLimit"` } @@ -51002,6 +51244,8 @@ type GetClusterNodeConfigKubeletConfigArgs struct { CpuCfsQuotaPeriod pulumi.StringInput `pulumi:"cpuCfsQuotaPeriod"` // Control the CPU management policy on the node. CpuManagerPolicy pulumi.StringInput `pulumi:"cpuManagerPolicy"` + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled pulumi.StringInput `pulumi:"insecureKubeletReadonlyPortEnabled"` // Controls the maximum number of processes allowed to run in a pod. PodPidsLimit pulumi.IntInput `pulumi:"podPidsLimit"` } @@ -51072,6 +51316,11 @@ func (o GetClusterNodeConfigKubeletConfigOutput) CpuManagerPolicy() pulumi.Strin return o.ApplyT(func(v GetClusterNodeConfigKubeletConfig) string { return v.CpuManagerPolicy }).(pulumi.StringOutput) } +// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. +func (o GetClusterNodeConfigKubeletConfigOutput) InsecureKubeletReadonlyPortEnabled() pulumi.StringOutput { + return o.ApplyT(func(v GetClusterNodeConfigKubeletConfig) string { return v.InsecureKubeletReadonlyPortEnabled }).(pulumi.StringOutput) +} + // Controls the maximum number of processes allowed to run in a pod. func (o GetClusterNodeConfigKubeletConfigOutput) PodPidsLimit() pulumi.IntOutput { return o.ApplyT(func(v GetClusterNodeConfigKubeletConfig) int { return v.PodPidsLimit }).(pulumi.IntOutput) @@ -52382,6 +52631,8 @@ func (o GetClusterNodePoolArrayOutput) Index(i pulumi.IntInput) GetClusterNodePo type GetClusterNodePoolAutoConfig struct { // Collection of Compute Engine network tags that can be applied to a node's underlying VM instance. NetworkTags []GetClusterNodePoolAutoConfigNetworkTag `pulumi:"networkTags"` + // Node kubelet configs. + NodeKubeletConfigs []GetClusterNodePoolAutoConfigNodeKubeletConfig `pulumi:"nodeKubeletConfigs"` // A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. ResourceManagerTags map[string]string `pulumi:"resourceManagerTags"` } @@ -52400,6 +52651,8 @@ type GetClusterNodePoolAutoConfigInput interface { type GetClusterNodePoolAutoConfigArgs struct { // Collection of Compute Engine network tags that can be applied to a node's underlying VM instance. NetworkTags GetClusterNodePoolAutoConfigNetworkTagArrayInput `pulumi:"networkTags"` + // Node kubelet configs. + NodeKubeletConfigs GetClusterNodePoolAutoConfigNodeKubeletConfigArrayInput `pulumi:"nodeKubeletConfigs"` // A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. ResourceManagerTags pulumi.StringMapInput `pulumi:"resourceManagerTags"` } @@ -52460,6 +52713,13 @@ func (o GetClusterNodePoolAutoConfigOutput) NetworkTags() GetClusterNodePoolAuto return o.ApplyT(func(v GetClusterNodePoolAutoConfig) []GetClusterNodePoolAutoConfigNetworkTag { return v.NetworkTags }).(GetClusterNodePoolAutoConfigNetworkTagArrayOutput) } +// Node kubelet configs. +func (o GetClusterNodePoolAutoConfigOutput) NodeKubeletConfigs() GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput { + return o.ApplyT(func(v GetClusterNodePoolAutoConfig) []GetClusterNodePoolAutoConfigNodeKubeletConfig { + return v.NodeKubeletConfigs + }).(GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput) +} + // A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. func (o GetClusterNodePoolAutoConfigOutput) ResourceManagerTags() pulumi.StringMapOutput { return o.ApplyT(func(v GetClusterNodePoolAutoConfig) map[string]string { return v.ResourceManagerTags }).(pulumi.StringMapOutput) @@ -52582,6 +52842,105 @@ func (o GetClusterNodePoolAutoConfigNetworkTagArrayOutput) Index(i pulumi.IntInp }).(GetClusterNodePoolAutoConfigNetworkTagOutput) } +type GetClusterNodePoolAutoConfigNodeKubeletConfig struct { + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled string `pulumi:"insecureKubeletReadonlyPortEnabled"` +} + +// GetClusterNodePoolAutoConfigNodeKubeletConfigInput is an input type that accepts GetClusterNodePoolAutoConfigNodeKubeletConfigArgs and GetClusterNodePoolAutoConfigNodeKubeletConfigOutput values. +// You can construct a concrete instance of `GetClusterNodePoolAutoConfigNodeKubeletConfigInput` via: +// +// GetClusterNodePoolAutoConfigNodeKubeletConfigArgs{...} +type GetClusterNodePoolAutoConfigNodeKubeletConfigInput interface { + pulumi.Input + + ToGetClusterNodePoolAutoConfigNodeKubeletConfigOutput() GetClusterNodePoolAutoConfigNodeKubeletConfigOutput + ToGetClusterNodePoolAutoConfigNodeKubeletConfigOutputWithContext(context.Context) GetClusterNodePoolAutoConfigNodeKubeletConfigOutput +} + +type GetClusterNodePoolAutoConfigNodeKubeletConfigArgs struct { + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled pulumi.StringInput `pulumi:"insecureKubeletReadonlyPortEnabled"` +} + +func (GetClusterNodePoolAutoConfigNodeKubeletConfigArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetClusterNodePoolAutoConfigNodeKubeletConfig)(nil)).Elem() +} + +func (i GetClusterNodePoolAutoConfigNodeKubeletConfigArgs) ToGetClusterNodePoolAutoConfigNodeKubeletConfigOutput() GetClusterNodePoolAutoConfigNodeKubeletConfigOutput { + return i.ToGetClusterNodePoolAutoConfigNodeKubeletConfigOutputWithContext(context.Background()) +} + +func (i GetClusterNodePoolAutoConfigNodeKubeletConfigArgs) ToGetClusterNodePoolAutoConfigNodeKubeletConfigOutputWithContext(ctx context.Context) GetClusterNodePoolAutoConfigNodeKubeletConfigOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetClusterNodePoolAutoConfigNodeKubeletConfigOutput) +} + +// GetClusterNodePoolAutoConfigNodeKubeletConfigArrayInput is an input type that accepts GetClusterNodePoolAutoConfigNodeKubeletConfigArray and GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput values. +// You can construct a concrete instance of `GetClusterNodePoolAutoConfigNodeKubeletConfigArrayInput` via: +// +// GetClusterNodePoolAutoConfigNodeKubeletConfigArray{ GetClusterNodePoolAutoConfigNodeKubeletConfigArgs{...} } +type GetClusterNodePoolAutoConfigNodeKubeletConfigArrayInput interface { + pulumi.Input + + ToGetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput() GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput + ToGetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutputWithContext(context.Context) GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput +} + +type GetClusterNodePoolAutoConfigNodeKubeletConfigArray []GetClusterNodePoolAutoConfigNodeKubeletConfigInput + +func (GetClusterNodePoolAutoConfigNodeKubeletConfigArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetClusterNodePoolAutoConfigNodeKubeletConfig)(nil)).Elem() +} + +func (i GetClusterNodePoolAutoConfigNodeKubeletConfigArray) ToGetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput() GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput { + return i.ToGetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutputWithContext(context.Background()) +} + +func (i GetClusterNodePoolAutoConfigNodeKubeletConfigArray) ToGetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutputWithContext(ctx context.Context) GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput) +} + +type GetClusterNodePoolAutoConfigNodeKubeletConfigOutput struct{ *pulumi.OutputState } + +func (GetClusterNodePoolAutoConfigNodeKubeletConfigOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetClusterNodePoolAutoConfigNodeKubeletConfig)(nil)).Elem() +} + +func (o GetClusterNodePoolAutoConfigNodeKubeletConfigOutput) ToGetClusterNodePoolAutoConfigNodeKubeletConfigOutput() GetClusterNodePoolAutoConfigNodeKubeletConfigOutput { + return o +} + +func (o GetClusterNodePoolAutoConfigNodeKubeletConfigOutput) ToGetClusterNodePoolAutoConfigNodeKubeletConfigOutputWithContext(ctx context.Context) GetClusterNodePoolAutoConfigNodeKubeletConfigOutput { + return o +} + +// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. +func (o GetClusterNodePoolAutoConfigNodeKubeletConfigOutput) InsecureKubeletReadonlyPortEnabled() pulumi.StringOutput { + return o.ApplyT(func(v GetClusterNodePoolAutoConfigNodeKubeletConfig) string { + return v.InsecureKubeletReadonlyPortEnabled + }).(pulumi.StringOutput) +} + +type GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput struct{ *pulumi.OutputState } + +func (GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetClusterNodePoolAutoConfigNodeKubeletConfig)(nil)).Elem() +} + +func (o GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput) ToGetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput() GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput { + return o +} + +func (o GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput) ToGetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutputWithContext(ctx context.Context) GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput { + return o +} + +func (o GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput) Index(i pulumi.IntInput) GetClusterNodePoolAutoConfigNodeKubeletConfigOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetClusterNodePoolAutoConfigNodeKubeletConfig { + return vs[0].([]GetClusterNodePoolAutoConfigNodeKubeletConfig)[vs[1].(int)] + }).(GetClusterNodePoolAutoConfigNodeKubeletConfigOutput) +} + type GetClusterNodePoolAutoscaling struct { // Location policy specifies the algorithm used when scaling-up the node pool. "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduces preemption risk for Spot VMs. LocationPolicy string `pulumi:"locationPolicy"` @@ -52819,6 +53178,8 @@ type GetClusterNodePoolDefaultNodeConfigDefault struct { ContainerdConfigs []GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfig `pulumi:"containerdConfigs"` // GCFS configuration for this node. GcfsConfigs []GetClusterNodePoolDefaultNodeConfigDefaultGcfsConfig `pulumi:"gcfsConfigs"` + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled string `pulumi:"insecureKubeletReadonlyPortEnabled"` // Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. LoggingVariant string `pulumi:"loggingVariant"` } @@ -52839,6 +53200,8 @@ type GetClusterNodePoolDefaultNodeConfigDefaultArgs struct { ContainerdConfigs GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigArrayInput `pulumi:"containerdConfigs"` // GCFS configuration for this node. GcfsConfigs GetClusterNodePoolDefaultNodeConfigDefaultGcfsConfigArrayInput `pulumi:"gcfsConfigs"` + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled pulumi.StringInput `pulumi:"insecureKubeletReadonlyPortEnabled"` // Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. LoggingVariant pulumi.StringInput `pulumi:"loggingVariant"` } @@ -52908,6 +53271,11 @@ func (o GetClusterNodePoolDefaultNodeConfigDefaultOutput) GcfsConfigs() GetClust }).(GetClusterNodePoolDefaultNodeConfigDefaultGcfsConfigArrayOutput) } +// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. +func (o GetClusterNodePoolDefaultNodeConfigDefaultOutput) InsecureKubeletReadonlyPortEnabled() pulumi.StringOutput { + return o.ApplyT(func(v GetClusterNodePoolDefaultNodeConfigDefault) string { return v.InsecureKubeletReadonlyPortEnabled }).(pulumi.StringOutput) +} + // Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. func (o GetClusterNodePoolDefaultNodeConfigDefaultOutput) LoggingVariant() pulumi.StringOutput { return o.ApplyT(func(v GetClusterNodePoolDefaultNodeConfigDefault) string { return v.LoggingVariant }).(pulumi.StringOutput) @@ -56287,6 +56655,8 @@ type GetClusterNodePoolNodeConfigKubeletConfig struct { CpuCfsQuotaPeriod string `pulumi:"cpuCfsQuotaPeriod"` // Control the CPU management policy on the node. CpuManagerPolicy string `pulumi:"cpuManagerPolicy"` + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled string `pulumi:"insecureKubeletReadonlyPortEnabled"` // Controls the maximum number of processes allowed to run in a pod. PodPidsLimit int `pulumi:"podPidsLimit"` } @@ -56309,6 +56679,8 @@ type GetClusterNodePoolNodeConfigKubeletConfigArgs struct { CpuCfsQuotaPeriod pulumi.StringInput `pulumi:"cpuCfsQuotaPeriod"` // Control the CPU management policy on the node. CpuManagerPolicy pulumi.StringInput `pulumi:"cpuManagerPolicy"` + // Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + InsecureKubeletReadonlyPortEnabled pulumi.StringInput `pulumi:"insecureKubeletReadonlyPortEnabled"` // Controls the maximum number of processes allowed to run in a pod. PodPidsLimit pulumi.IntInput `pulumi:"podPidsLimit"` } @@ -56379,6 +56751,11 @@ func (o GetClusterNodePoolNodeConfigKubeletConfigOutput) CpuManagerPolicy() pulu return o.ApplyT(func(v GetClusterNodePoolNodeConfigKubeletConfig) string { return v.CpuManagerPolicy }).(pulumi.StringOutput) } +// Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. +func (o GetClusterNodePoolNodeConfigKubeletConfigOutput) InsecureKubeletReadonlyPortEnabled() pulumi.StringOutput { + return o.ApplyT(func(v GetClusterNodePoolNodeConfigKubeletConfig) string { return v.InsecureKubeletReadonlyPortEnabled }).(pulumi.StringOutput) +} + // Controls the maximum number of processes allowed to run in a pod. func (o GetClusterNodePoolNodeConfigKubeletConfigOutput) PodPidsLimit() pulumi.IntOutput { return o.ApplyT(func(v GetClusterNodePoolNodeConfigKubeletConfig) int { return v.PodPidsLimit }).(pulumi.IntOutput) @@ -60234,6 +60611,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*ClusterNodePoolAutoConfigPtrInput)(nil)).Elem(), ClusterNodePoolAutoConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ClusterNodePoolAutoConfigNetworkTagsInput)(nil)).Elem(), ClusterNodePoolAutoConfigNetworkTagsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ClusterNodePoolAutoConfigNetworkTagsPtrInput)(nil)).Elem(), ClusterNodePoolAutoConfigNetworkTagsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ClusterNodePoolAutoConfigNodeKubeletConfigInput)(nil)).Elem(), ClusterNodePoolAutoConfigNodeKubeletConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ClusterNodePoolAutoConfigNodeKubeletConfigPtrInput)(nil)).Elem(), ClusterNodePoolAutoConfigNodeKubeletConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ClusterNodePoolAutoscalingInput)(nil)).Elem(), ClusterNodePoolAutoscalingArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ClusterNodePoolAutoscalingPtrInput)(nil)).Elem(), ClusterNodePoolAutoscalingArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ClusterNodePoolDefaultsInput)(nil)).Elem(), ClusterNodePoolDefaultsArgs{}) @@ -60615,6 +60994,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetClusterNodePoolAutoConfigArrayInput)(nil)).Elem(), GetClusterNodePoolAutoConfigArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetClusterNodePoolAutoConfigNetworkTagInput)(nil)).Elem(), GetClusterNodePoolAutoConfigNetworkTagArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetClusterNodePoolAutoConfigNetworkTagArrayInput)(nil)).Elem(), GetClusterNodePoolAutoConfigNetworkTagArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetClusterNodePoolAutoConfigNodeKubeletConfigInput)(nil)).Elem(), GetClusterNodePoolAutoConfigNodeKubeletConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetClusterNodePoolAutoConfigNodeKubeletConfigArrayInput)(nil)).Elem(), GetClusterNodePoolAutoConfigNodeKubeletConfigArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetClusterNodePoolAutoscalingInput)(nil)).Elem(), GetClusterNodePoolAutoscalingArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetClusterNodePoolAutoscalingArrayInput)(nil)).Elem(), GetClusterNodePoolAutoscalingArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetClusterNodePoolDefaultInput)(nil)).Elem(), GetClusterNodePoolDefaultArgs{}) @@ -61052,6 +61433,8 @@ func init() { pulumi.RegisterOutputType(ClusterNodePoolAutoConfigPtrOutput{}) pulumi.RegisterOutputType(ClusterNodePoolAutoConfigNetworkTagsOutput{}) pulumi.RegisterOutputType(ClusterNodePoolAutoConfigNetworkTagsPtrOutput{}) + pulumi.RegisterOutputType(ClusterNodePoolAutoConfigNodeKubeletConfigOutput{}) + pulumi.RegisterOutputType(ClusterNodePoolAutoConfigNodeKubeletConfigPtrOutput{}) pulumi.RegisterOutputType(ClusterNodePoolAutoscalingOutput{}) pulumi.RegisterOutputType(ClusterNodePoolAutoscalingPtrOutput{}) pulumi.RegisterOutputType(ClusterNodePoolDefaultsOutput{}) @@ -61433,6 +61816,8 @@ func init() { pulumi.RegisterOutputType(GetClusterNodePoolAutoConfigArrayOutput{}) pulumi.RegisterOutputType(GetClusterNodePoolAutoConfigNetworkTagOutput{}) pulumi.RegisterOutputType(GetClusterNodePoolAutoConfigNetworkTagArrayOutput{}) + pulumi.RegisterOutputType(GetClusterNodePoolAutoConfigNodeKubeletConfigOutput{}) + pulumi.RegisterOutputType(GetClusterNodePoolAutoConfigNodeKubeletConfigArrayOutput{}) pulumi.RegisterOutputType(GetClusterNodePoolAutoscalingOutput{}) pulumi.RegisterOutputType(GetClusterNodePoolAutoscalingArrayOutput{}) pulumi.RegisterOutputType(GetClusterNodePoolDefaultOutput{}) diff --git a/sdk/go/gcp/databasemigrationservice/connectionProfile.go b/sdk/go/gcp/databasemigrationservice/connectionProfile.go index e5f9b869b9..91bb1817fd 100644 --- a/sdk/go/gcp/databasemigrationservice/connectionProfile.go +++ b/sdk/go/gcp/databasemigrationservice/connectionProfile.go @@ -345,6 +345,207 @@ import ( // } // // ``` +// ### Database Migration Service Connection Profile Existing Mysql +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/databasemigrationservice" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := organizations.LookupProject(ctx, nil, nil) +// if err != nil { +// return err +// } +// destinationCsql, err := sql.NewDatabaseInstance(ctx, "destination_csql", &sql.DatabaseInstanceArgs{ +// Name: pulumi.String("destination-csql"), +// DatabaseVersion: pulumi.String("MYSQL_5_7"), +// Settings: &sql.DatabaseInstanceSettingsArgs{ +// Tier: pulumi.String("db-n1-standard-1"), +// DeletionProtectionEnabled: pulumi.Bool(false), +// }, +// DeletionProtection: pulumi.Bool(false), +// }) +// if err != nil { +// return err +// } +// _, err = databasemigrationservice.NewConnectionProfile(ctx, "existing-mysql", &databasemigrationservice.ConnectionProfileArgs{ +// Location: pulumi.String("us-central1"), +// ConnectionProfileId: pulumi.String("destination-cp"), +// DisplayName: pulumi.String("destination-cp_display"), +// Labels: pulumi.StringMap{ +// "foo": pulumi.String("bar"), +// }, +// Mysql: &databasemigrationservice.ConnectionProfileMysqlArgs{ +// CloudSqlId: pulumi.String("destination-csql"), +// }, +// }, pulumi.DependsOn([]pulumi.Resource{ +// destinationCsql, +// })) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// ### Database Migration Service Connection Profile Existing Postgres +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/databasemigrationservice" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := organizations.LookupProject(ctx, nil, nil) +// if err != nil { +// return err +// } +// destinationCsql, err := sql.NewDatabaseInstance(ctx, "destination_csql", &sql.DatabaseInstanceArgs{ +// Name: pulumi.String("destination-csql"), +// DatabaseVersion: pulumi.String("POSTGRES_15"), +// Settings: &sql.DatabaseInstanceSettingsArgs{ +// Tier: pulumi.String("db-custom-2-13312"), +// DeletionProtectionEnabled: pulumi.Bool(false), +// }, +// DeletionProtection: pulumi.Bool(false), +// }) +// if err != nil { +// return err +// } +// _, err = databasemigrationservice.NewConnectionProfile(ctx, "existing-psql", &databasemigrationservice.ConnectionProfileArgs{ +// Location: pulumi.String("us-central1"), +// ConnectionProfileId: pulumi.String("destination-cp"), +// DisplayName: pulumi.String("destination-cp_display"), +// Labels: pulumi.StringMap{ +// "foo": pulumi.String("bar"), +// }, +// Postgresql: &databasemigrationservice.ConnectionProfilePostgresqlArgs{ +// CloudSqlId: pulumi.String("destination-csql"), +// }, +// }, pulumi.DependsOn([]pulumi.Resource{ +// destinationCsql, +// })) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// ### Database Migration Service Connection Profile Existing Alloydb +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/alloydb" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/compute" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/databasemigrationservice" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/servicenetworking" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := organizations.LookupProject(ctx, nil, nil) +// if err != nil { +// return err +// } +// _, err = compute.NewNetwork(ctx, "default", &compute.NetworkArgs{ +// Name: pulumi.String("destination-alloydb"), +// }) +// if err != nil { +// return err +// } +// destinationAlloydb, err := alloydb.NewCluster(ctx, "destination_alloydb", &alloydb.ClusterArgs{ +// ClusterId: pulumi.String("destination-alloydb"), +// Location: pulumi.String("us-central1"), +// NetworkConfig: &alloydb.ClusterNetworkConfigArgs{ +// Network: _default.ID(), +// }, +// DatabaseVersion: pulumi.String("POSTGRES_15"), +// InitialUser: &alloydb.ClusterInitialUserArgs{ +// User: pulumi.String("destination-alloydb"), +// Password: pulumi.String("destination-alloydb"), +// }, +// }) +// if err != nil { +// return err +// } +// privateIpAlloc, err := compute.NewGlobalAddress(ctx, "private_ip_alloc", &compute.GlobalAddressArgs{ +// Name: pulumi.String("destination-alloydb"), +// AddressType: pulumi.String("INTERNAL"), +// Purpose: pulumi.String("VPC_PEERING"), +// PrefixLength: pulumi.Int(16), +// Network: _default.ID(), +// }) +// if err != nil { +// return err +// } +// vpcConnection, err := servicenetworking.NewConnection(ctx, "vpc_connection", &servicenetworking.ConnectionArgs{ +// Network: _default.ID(), +// Service: pulumi.String("servicenetworking.googleapis.com"), +// ReservedPeeringRanges: pulumi.StringArray{ +// privateIpAlloc.Name, +// }, +// }) +// if err != nil { +// return err +// } +// destinationAlloydbPrimary, err := alloydb.NewInstance(ctx, "destination_alloydb_primary", &alloydb.InstanceArgs{ +// Cluster: destinationAlloydb.Name, +// InstanceId: pulumi.String("destination-alloydb-primary"), +// InstanceType: pulumi.String("PRIMARY"), +// }, pulumi.DependsOn([]pulumi.Resource{ +// vpcConnection, +// })) +// if err != nil { +// return err +// } +// _, err = databasemigrationservice.NewConnectionProfile(ctx, "existing-alloydb", &databasemigrationservice.ConnectionProfileArgs{ +// Location: pulumi.String("us-central1"), +// ConnectionProfileId: pulumi.String("destination-cp"), +// DisplayName: pulumi.String("destination-cp_display"), +// Labels: pulumi.StringMap{ +// "foo": pulumi.String("bar"), +// }, +// Postgresql: &databasemigrationservice.ConnectionProfilePostgresqlArgs{ +// AlloydbClusterId: pulumi.String("destination-alloydb"), +// }, +// }, pulumi.DependsOn([]pulumi.Resource{ +// destinationAlloydb, +// destinationAlloydbPrimary, +// })) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` // // ## Import // diff --git a/sdk/go/gcp/databasemigrationservice/pulumiTypes.go b/sdk/go/gcp/databasemigrationservice/pulumiTypes.go index ae885a7fea..d8f721d8b6 100644 --- a/sdk/go/gcp/databasemigrationservice/pulumiTypes.go +++ b/sdk/go/gcp/databasemigrationservice/pulumiTypes.go @@ -2072,22 +2072,22 @@ func (o ConnectionProfileErrorArrayOutput) Index(i pulumi.IntInput) ConnectionPr type ConnectionProfileMysql struct { // If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. CloudSqlId *string `pulumi:"cloudSqlId"` - // Required. The IP or hostname of the source MySQL database. - Host string `pulumi:"host"` - // Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + // The IP or hostname of the source MySQL database. + Host *string `pulumi:"host"` + // Input only. The password for the user that Database Migration Service will be using to connect to the database. // This field is not returned on request, and the value is encrypted when stored in Database Migration Service. // **Note**: This property is sensitive and will not be displayed in the plan. - Password string `pulumi:"password"` + Password *string `pulumi:"password"` // (Output) // Output only. Indicates If this connection profile password is stored. PasswordSet *bool `pulumi:"passwordSet"` - // Required. The network port of the source MySQL database. - Port int `pulumi:"port"` + // The network port of the source MySQL database. + Port *int `pulumi:"port"` // SSL configuration for the destination to connect to the source database. // Structure is documented below. Ssl *ConnectionProfileMysqlSsl `pulumi:"ssl"` - // Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. - Username string `pulumi:"username"` + // The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + Username *string `pulumi:"username"` } // ConnectionProfileMysqlInput is an input type that accepts ConnectionProfileMysqlArgs and ConnectionProfileMysqlOutput values. @@ -2104,22 +2104,22 @@ type ConnectionProfileMysqlInput interface { type ConnectionProfileMysqlArgs struct { // If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. CloudSqlId pulumi.StringPtrInput `pulumi:"cloudSqlId"` - // Required. The IP or hostname of the source MySQL database. - Host pulumi.StringInput `pulumi:"host"` - // Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + // The IP or hostname of the source MySQL database. + Host pulumi.StringPtrInput `pulumi:"host"` + // Input only. The password for the user that Database Migration Service will be using to connect to the database. // This field is not returned on request, and the value is encrypted when stored in Database Migration Service. // **Note**: This property is sensitive and will not be displayed in the plan. - Password pulumi.StringInput `pulumi:"password"` + Password pulumi.StringPtrInput `pulumi:"password"` // (Output) // Output only. Indicates If this connection profile password is stored. PasswordSet pulumi.BoolPtrInput `pulumi:"passwordSet"` - // Required. The network port of the source MySQL database. - Port pulumi.IntInput `pulumi:"port"` + // The network port of the source MySQL database. + Port pulumi.IntPtrInput `pulumi:"port"` // SSL configuration for the destination to connect to the source database. // Structure is documented below. Ssl ConnectionProfileMysqlSslPtrInput `pulumi:"ssl"` - // Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. - Username pulumi.StringInput `pulumi:"username"` + // The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + Username pulumi.StringPtrInput `pulumi:"username"` } func (ConnectionProfileMysqlArgs) ElementType() reflect.Type { @@ -2204,16 +2204,16 @@ func (o ConnectionProfileMysqlOutput) CloudSqlId() pulumi.StringPtrOutput { return o.ApplyT(func(v ConnectionProfileMysql) *string { return v.CloudSqlId }).(pulumi.StringPtrOutput) } -// Required. The IP or hostname of the source MySQL database. -func (o ConnectionProfileMysqlOutput) Host() pulumi.StringOutput { - return o.ApplyT(func(v ConnectionProfileMysql) string { return v.Host }).(pulumi.StringOutput) +// The IP or hostname of the source MySQL database. +func (o ConnectionProfileMysqlOutput) Host() pulumi.StringPtrOutput { + return o.ApplyT(func(v ConnectionProfileMysql) *string { return v.Host }).(pulumi.StringPtrOutput) } -// Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. +// Input only. The password for the user that Database Migration Service will be using to connect to the database. // This field is not returned on request, and the value is encrypted when stored in Database Migration Service. // **Note**: This property is sensitive and will not be displayed in the plan. -func (o ConnectionProfileMysqlOutput) Password() pulumi.StringOutput { - return o.ApplyT(func(v ConnectionProfileMysql) string { return v.Password }).(pulumi.StringOutput) +func (o ConnectionProfileMysqlOutput) Password() pulumi.StringPtrOutput { + return o.ApplyT(func(v ConnectionProfileMysql) *string { return v.Password }).(pulumi.StringPtrOutput) } // (Output) @@ -2222,9 +2222,9 @@ func (o ConnectionProfileMysqlOutput) PasswordSet() pulumi.BoolPtrOutput { return o.ApplyT(func(v ConnectionProfileMysql) *bool { return v.PasswordSet }).(pulumi.BoolPtrOutput) } -// Required. The network port of the source MySQL database. -func (o ConnectionProfileMysqlOutput) Port() pulumi.IntOutput { - return o.ApplyT(func(v ConnectionProfileMysql) int { return v.Port }).(pulumi.IntOutput) +// The network port of the source MySQL database. +func (o ConnectionProfileMysqlOutput) Port() pulumi.IntPtrOutput { + return o.ApplyT(func(v ConnectionProfileMysql) *int { return v.Port }).(pulumi.IntPtrOutput) } // SSL configuration for the destination to connect to the source database. @@ -2233,9 +2233,9 @@ func (o ConnectionProfileMysqlOutput) Ssl() ConnectionProfileMysqlSslPtrOutput { return o.ApplyT(func(v ConnectionProfileMysql) *ConnectionProfileMysqlSsl { return v.Ssl }).(ConnectionProfileMysqlSslPtrOutput) } -// Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. -func (o ConnectionProfileMysqlOutput) Username() pulumi.StringOutput { - return o.ApplyT(func(v ConnectionProfileMysql) string { return v.Username }).(pulumi.StringOutput) +// The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. +func (o ConnectionProfileMysqlOutput) Username() pulumi.StringPtrOutput { + return o.ApplyT(func(v ConnectionProfileMysql) *string { return v.Username }).(pulumi.StringPtrOutput) } type ConnectionProfileMysqlPtrOutput struct{ *pulumi.OutputState } @@ -2272,17 +2272,17 @@ func (o ConnectionProfileMysqlPtrOutput) CloudSqlId() pulumi.StringPtrOutput { }).(pulumi.StringPtrOutput) } -// Required. The IP or hostname of the source MySQL database. +// The IP or hostname of the source MySQL database. func (o ConnectionProfileMysqlPtrOutput) Host() pulumi.StringPtrOutput { return o.ApplyT(func(v *ConnectionProfileMysql) *string { if v == nil { return nil } - return &v.Host + return v.Host }).(pulumi.StringPtrOutput) } -// Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. +// Input only. The password for the user that Database Migration Service will be using to connect to the database. // This field is not returned on request, and the value is encrypted when stored in Database Migration Service. // **Note**: This property is sensitive and will not be displayed in the plan. func (o ConnectionProfileMysqlPtrOutput) Password() pulumi.StringPtrOutput { @@ -2290,7 +2290,7 @@ func (o ConnectionProfileMysqlPtrOutput) Password() pulumi.StringPtrOutput { if v == nil { return nil } - return &v.Password + return v.Password }).(pulumi.StringPtrOutput) } @@ -2305,13 +2305,13 @@ func (o ConnectionProfileMysqlPtrOutput) PasswordSet() pulumi.BoolPtrOutput { }).(pulumi.BoolPtrOutput) } -// Required. The network port of the source MySQL database. +// The network port of the source MySQL database. func (o ConnectionProfileMysqlPtrOutput) Port() pulumi.IntPtrOutput { return o.ApplyT(func(v *ConnectionProfileMysql) *int { if v == nil { return nil } - return &v.Port + return v.Port }).(pulumi.IntPtrOutput) } @@ -2326,13 +2326,13 @@ func (o ConnectionProfileMysqlPtrOutput) Ssl() ConnectionProfileMysqlSslPtrOutpu }).(ConnectionProfileMysqlSslPtrOutput) } -// Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. +// The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. func (o ConnectionProfileMysqlPtrOutput) Username() pulumi.StringPtrOutput { return o.ApplyT(func(v *ConnectionProfileMysql) *string { if v == nil { return nil } - return &v.Username + return v.Username }).(pulumi.StringPtrOutput) } @@ -3599,27 +3599,29 @@ func (o ConnectionProfileOracleStaticServiceIpConnectivityPtrOutput) Elem() Conn } type ConnectionProfilePostgresql struct { + // If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. + AlloydbClusterId *string `pulumi:"alloydbClusterId"` // If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. CloudSqlId *string `pulumi:"cloudSqlId"` - // Required. The IP or hostname of the source MySQL database. - Host string `pulumi:"host"` + // The IP or hostname of the source MySQL database. + Host *string `pulumi:"host"` // (Output) // Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with. NetworkArchitecture *string `pulumi:"networkArchitecture"` - // Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + // Input only. The password for the user that Database Migration Service will be using to connect to the database. // This field is not returned on request, and the value is encrypted when stored in Database Migration Service. // **Note**: This property is sensitive and will not be displayed in the plan. - Password string `pulumi:"password"` + Password *string `pulumi:"password"` // (Output) // Output only. Indicates If this connection profile password is stored. PasswordSet *bool `pulumi:"passwordSet"` - // Required. The network port of the source MySQL database. - Port int `pulumi:"port"` + // The network port of the source MySQL database. + Port *int `pulumi:"port"` // SSL configuration for the destination to connect to the source database. // Structure is documented below. Ssl *ConnectionProfilePostgresqlSsl `pulumi:"ssl"` - // Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. - Username string `pulumi:"username"` + // The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + Username *string `pulumi:"username"` } // ConnectionProfilePostgresqlInput is an input type that accepts ConnectionProfilePostgresqlArgs and ConnectionProfilePostgresqlOutput values. @@ -3634,27 +3636,29 @@ type ConnectionProfilePostgresqlInput interface { } type ConnectionProfilePostgresqlArgs struct { + // If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. + AlloydbClusterId pulumi.StringPtrInput `pulumi:"alloydbClusterId"` // If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. CloudSqlId pulumi.StringPtrInput `pulumi:"cloudSqlId"` - // Required. The IP or hostname of the source MySQL database. - Host pulumi.StringInput `pulumi:"host"` + // The IP or hostname of the source MySQL database. + Host pulumi.StringPtrInput `pulumi:"host"` // (Output) // Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with. NetworkArchitecture pulumi.StringPtrInput `pulumi:"networkArchitecture"` - // Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + // Input only. The password for the user that Database Migration Service will be using to connect to the database. // This field is not returned on request, and the value is encrypted when stored in Database Migration Service. // **Note**: This property is sensitive and will not be displayed in the plan. - Password pulumi.StringInput `pulumi:"password"` + Password pulumi.StringPtrInput `pulumi:"password"` // (Output) // Output only. Indicates If this connection profile password is stored. PasswordSet pulumi.BoolPtrInput `pulumi:"passwordSet"` - // Required. The network port of the source MySQL database. - Port pulumi.IntInput `pulumi:"port"` + // The network port of the source MySQL database. + Port pulumi.IntPtrInput `pulumi:"port"` // SSL configuration for the destination to connect to the source database. // Structure is documented below. Ssl ConnectionProfilePostgresqlSslPtrInput `pulumi:"ssl"` - // Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. - Username pulumi.StringInput `pulumi:"username"` + // The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + Username pulumi.StringPtrInput `pulumi:"username"` } func (ConnectionProfilePostgresqlArgs) ElementType() reflect.Type { @@ -3734,14 +3738,19 @@ func (o ConnectionProfilePostgresqlOutput) ToConnectionProfilePostgresqlPtrOutpu }).(ConnectionProfilePostgresqlPtrOutput) } +// If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. +func (o ConnectionProfilePostgresqlOutput) AlloydbClusterId() pulumi.StringPtrOutput { + return o.ApplyT(func(v ConnectionProfilePostgresql) *string { return v.AlloydbClusterId }).(pulumi.StringPtrOutput) +} + // If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. func (o ConnectionProfilePostgresqlOutput) CloudSqlId() pulumi.StringPtrOutput { return o.ApplyT(func(v ConnectionProfilePostgresql) *string { return v.CloudSqlId }).(pulumi.StringPtrOutput) } -// Required. The IP or hostname of the source MySQL database. -func (o ConnectionProfilePostgresqlOutput) Host() pulumi.StringOutput { - return o.ApplyT(func(v ConnectionProfilePostgresql) string { return v.Host }).(pulumi.StringOutput) +// The IP or hostname of the source MySQL database. +func (o ConnectionProfilePostgresqlOutput) Host() pulumi.StringPtrOutput { + return o.ApplyT(func(v ConnectionProfilePostgresql) *string { return v.Host }).(pulumi.StringPtrOutput) } // (Output) @@ -3750,11 +3759,11 @@ func (o ConnectionProfilePostgresqlOutput) NetworkArchitecture() pulumi.StringPt return o.ApplyT(func(v ConnectionProfilePostgresql) *string { return v.NetworkArchitecture }).(pulumi.StringPtrOutput) } -// Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. +// Input only. The password for the user that Database Migration Service will be using to connect to the database. // This field is not returned on request, and the value is encrypted when stored in Database Migration Service. // **Note**: This property is sensitive and will not be displayed in the plan. -func (o ConnectionProfilePostgresqlOutput) Password() pulumi.StringOutput { - return o.ApplyT(func(v ConnectionProfilePostgresql) string { return v.Password }).(pulumi.StringOutput) +func (o ConnectionProfilePostgresqlOutput) Password() pulumi.StringPtrOutput { + return o.ApplyT(func(v ConnectionProfilePostgresql) *string { return v.Password }).(pulumi.StringPtrOutput) } // (Output) @@ -3763,9 +3772,9 @@ func (o ConnectionProfilePostgresqlOutput) PasswordSet() pulumi.BoolPtrOutput { return o.ApplyT(func(v ConnectionProfilePostgresql) *bool { return v.PasswordSet }).(pulumi.BoolPtrOutput) } -// Required. The network port of the source MySQL database. -func (o ConnectionProfilePostgresqlOutput) Port() pulumi.IntOutput { - return o.ApplyT(func(v ConnectionProfilePostgresql) int { return v.Port }).(pulumi.IntOutput) +// The network port of the source MySQL database. +func (o ConnectionProfilePostgresqlOutput) Port() pulumi.IntPtrOutput { + return o.ApplyT(func(v ConnectionProfilePostgresql) *int { return v.Port }).(pulumi.IntPtrOutput) } // SSL configuration for the destination to connect to the source database. @@ -3774,9 +3783,9 @@ func (o ConnectionProfilePostgresqlOutput) Ssl() ConnectionProfilePostgresqlSslP return o.ApplyT(func(v ConnectionProfilePostgresql) *ConnectionProfilePostgresqlSsl { return v.Ssl }).(ConnectionProfilePostgresqlSslPtrOutput) } -// Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. -func (o ConnectionProfilePostgresqlOutput) Username() pulumi.StringOutput { - return o.ApplyT(func(v ConnectionProfilePostgresql) string { return v.Username }).(pulumi.StringOutput) +// The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. +func (o ConnectionProfilePostgresqlOutput) Username() pulumi.StringPtrOutput { + return o.ApplyT(func(v ConnectionProfilePostgresql) *string { return v.Username }).(pulumi.StringPtrOutput) } type ConnectionProfilePostgresqlPtrOutput struct{ *pulumi.OutputState } @@ -3803,6 +3812,16 @@ func (o ConnectionProfilePostgresqlPtrOutput) Elem() ConnectionProfilePostgresql }).(ConnectionProfilePostgresqlOutput) } +// If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. +func (o ConnectionProfilePostgresqlPtrOutput) AlloydbClusterId() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ConnectionProfilePostgresql) *string { + if v == nil { + return nil + } + return v.AlloydbClusterId + }).(pulumi.StringPtrOutput) +} + // If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. func (o ConnectionProfilePostgresqlPtrOutput) CloudSqlId() pulumi.StringPtrOutput { return o.ApplyT(func(v *ConnectionProfilePostgresql) *string { @@ -3813,13 +3832,13 @@ func (o ConnectionProfilePostgresqlPtrOutput) CloudSqlId() pulumi.StringPtrOutpu }).(pulumi.StringPtrOutput) } -// Required. The IP or hostname of the source MySQL database. +// The IP or hostname of the source MySQL database. func (o ConnectionProfilePostgresqlPtrOutput) Host() pulumi.StringPtrOutput { return o.ApplyT(func(v *ConnectionProfilePostgresql) *string { if v == nil { return nil } - return &v.Host + return v.Host }).(pulumi.StringPtrOutput) } @@ -3834,7 +3853,7 @@ func (o ConnectionProfilePostgresqlPtrOutput) NetworkArchitecture() pulumi.Strin }).(pulumi.StringPtrOutput) } -// Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. +// Input only. The password for the user that Database Migration Service will be using to connect to the database. // This field is not returned on request, and the value is encrypted when stored in Database Migration Service. // **Note**: This property is sensitive and will not be displayed in the plan. func (o ConnectionProfilePostgresqlPtrOutput) Password() pulumi.StringPtrOutput { @@ -3842,7 +3861,7 @@ func (o ConnectionProfilePostgresqlPtrOutput) Password() pulumi.StringPtrOutput if v == nil { return nil } - return &v.Password + return v.Password }).(pulumi.StringPtrOutput) } @@ -3857,13 +3876,13 @@ func (o ConnectionProfilePostgresqlPtrOutput) PasswordSet() pulumi.BoolPtrOutput }).(pulumi.BoolPtrOutput) } -// Required. The network port of the source MySQL database. +// The network port of the source MySQL database. func (o ConnectionProfilePostgresqlPtrOutput) Port() pulumi.IntPtrOutput { return o.ApplyT(func(v *ConnectionProfilePostgresql) *int { if v == nil { return nil } - return &v.Port + return v.Port }).(pulumi.IntPtrOutput) } @@ -3878,13 +3897,13 @@ func (o ConnectionProfilePostgresqlPtrOutput) Ssl() ConnectionProfilePostgresqlS }).(ConnectionProfilePostgresqlSslPtrOutput) } -// Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. +// The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. func (o ConnectionProfilePostgresqlPtrOutput) Username() pulumi.StringPtrOutput { return o.ApplyT(func(v *ConnectionProfilePostgresql) *string { if v == nil { return nil } - return &v.Username + return v.Username }).(pulumi.StringPtrOutput) } diff --git a/sdk/go/gcp/dataloss/pulumiTypes.go b/sdk/go/gcp/dataloss/pulumiTypes.go index 9d2049ed04..00d4a859c8 100644 --- a/sdk/go/gcp/dataloss/pulumiTypes.go +++ b/sdk/go/gcp/dataloss/pulumiTypes.go @@ -34711,6 +34711,9 @@ type PreventionDiscoveryConfigAction struct { // Publish a message into the Pub/Sub topic. // Structure is documented below. PubSubNotification *PreventionDiscoveryConfigActionPubSubNotification `pulumi:"pubSubNotification"` + // Publish a message into the Pub/Sub topic. + // Structure is documented below. + TagResources *PreventionDiscoveryConfigActionTagResources `pulumi:"tagResources"` } // PreventionDiscoveryConfigActionInput is an input type that accepts PreventionDiscoveryConfigActionArgs and PreventionDiscoveryConfigActionOutput values. @@ -34731,6 +34734,9 @@ type PreventionDiscoveryConfigActionArgs struct { // Publish a message into the Pub/Sub topic. // Structure is documented below. PubSubNotification PreventionDiscoveryConfigActionPubSubNotificationPtrInput `pulumi:"pubSubNotification"` + // Publish a message into the Pub/Sub topic. + // Structure is documented below. + TagResources PreventionDiscoveryConfigActionTagResourcesPtrInput `pulumi:"tagResources"` } func (PreventionDiscoveryConfigActionArgs) ElementType() reflect.Type { @@ -34800,6 +34806,14 @@ func (o PreventionDiscoveryConfigActionOutput) PubSubNotification() PreventionDi }).(PreventionDiscoveryConfigActionPubSubNotificationPtrOutput) } +// Publish a message into the Pub/Sub topic. +// Structure is documented below. +func (o PreventionDiscoveryConfigActionOutput) TagResources() PreventionDiscoveryConfigActionTagResourcesPtrOutput { + return o.ApplyT(func(v PreventionDiscoveryConfigAction) *PreventionDiscoveryConfigActionTagResources { + return v.TagResources + }).(PreventionDiscoveryConfigActionTagResourcesPtrOutput) +} + type PreventionDiscoveryConfigActionArrayOutput struct{ *pulumi.OutputState } func (PreventionDiscoveryConfigActionArrayOutput) ElementType() reflect.Type { @@ -35705,72 +35719,651 @@ func (i PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressi type PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayInput interface { pulumi.Input - ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput() PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput - ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutputWithContext(context.Context) PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput + ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput() PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput + ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutputWithContext(context.Context) PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput +} + +type PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArray []PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionInput + +func (PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition)(nil)).Elem() +} + +func (i PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArray) ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput() PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput { + return i.ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutputWithContext(context.Background()) +} + +func (i PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArray) ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput) +} + +type PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput struct{ *pulumi.OutputState } + +func (PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput) ElementType() reflect.Type { + return reflect.TypeOf((*PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition)(nil)).Elem() +} + +func (o PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput) ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput() PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput { + return o +} + +func (o PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput) ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput { + return o +} + +// The minimum data risk score that triggers the condition. +// Possible values are: `HIGH`, `MEDIUM_OR_HIGH`. +func (o PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput) MinimumRiskScore() pulumi.StringPtrOutput { + return o.ApplyT(func(v PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition) *string { + return v.MinimumRiskScore + }).(pulumi.StringPtrOutput) +} + +// The minimum sensitivity level that triggers the condition. +// Possible values are: `HIGH`, `MEDIUM_OR_HIGH`. +func (o PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput) MinimumSensitivityScore() pulumi.StringPtrOutput { + return o.ApplyT(func(v PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition) *string { + return v.MinimumSensitivityScore + }).(pulumi.StringPtrOutput) +} + +type PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput struct{ *pulumi.OutputState } + +func (PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition)(nil)).Elem() +} + +func (o PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput) ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput() PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput { + return o +} + +func (o PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput) ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput { + return o +} + +func (o PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput) Index(i pulumi.IntInput) PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition { + return vs[0].([]PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition)[vs[1].(int)] + }).(PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput) +} + +type PreventionDiscoveryConfigActionTagResources struct { + // Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + LowerDataRiskToLow *bool `pulumi:"lowerDataRiskToLow"` + // The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + // Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + ProfileGenerationsToTags []string `pulumi:"profileGenerationsToTags"` + // The tags to associate with different conditions. + // Structure is documented below. + TagConditions []PreventionDiscoveryConfigActionTagResourcesTagCondition `pulumi:"tagConditions"` +} + +// PreventionDiscoveryConfigActionTagResourcesInput is an input type that accepts PreventionDiscoveryConfigActionTagResourcesArgs and PreventionDiscoveryConfigActionTagResourcesOutput values. +// You can construct a concrete instance of `PreventionDiscoveryConfigActionTagResourcesInput` via: +// +// PreventionDiscoveryConfigActionTagResourcesArgs{...} +type PreventionDiscoveryConfigActionTagResourcesInput interface { + pulumi.Input + + ToPreventionDiscoveryConfigActionTagResourcesOutput() PreventionDiscoveryConfigActionTagResourcesOutput + ToPreventionDiscoveryConfigActionTagResourcesOutputWithContext(context.Context) PreventionDiscoveryConfigActionTagResourcesOutput +} + +type PreventionDiscoveryConfigActionTagResourcesArgs struct { + // Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + LowerDataRiskToLow pulumi.BoolPtrInput `pulumi:"lowerDataRiskToLow"` + // The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + // Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + ProfileGenerationsToTags pulumi.StringArrayInput `pulumi:"profileGenerationsToTags"` + // The tags to associate with different conditions. + // Structure is documented below. + TagConditions PreventionDiscoveryConfigActionTagResourcesTagConditionArrayInput `pulumi:"tagConditions"` +} + +func (PreventionDiscoveryConfigActionTagResourcesArgs) ElementType() reflect.Type { + return reflect.TypeOf((*PreventionDiscoveryConfigActionTagResources)(nil)).Elem() +} + +func (i PreventionDiscoveryConfigActionTagResourcesArgs) ToPreventionDiscoveryConfigActionTagResourcesOutput() PreventionDiscoveryConfigActionTagResourcesOutput { + return i.ToPreventionDiscoveryConfigActionTagResourcesOutputWithContext(context.Background()) +} + +func (i PreventionDiscoveryConfigActionTagResourcesArgs) ToPreventionDiscoveryConfigActionTagResourcesOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigActionTagResourcesOutput) +} + +func (i PreventionDiscoveryConfigActionTagResourcesArgs) ToPreventionDiscoveryConfigActionTagResourcesPtrOutput() PreventionDiscoveryConfigActionTagResourcesPtrOutput { + return i.ToPreventionDiscoveryConfigActionTagResourcesPtrOutputWithContext(context.Background()) +} + +func (i PreventionDiscoveryConfigActionTagResourcesArgs) ToPreventionDiscoveryConfigActionTagResourcesPtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigActionTagResourcesOutput).ToPreventionDiscoveryConfigActionTagResourcesPtrOutputWithContext(ctx) +} + +// PreventionDiscoveryConfigActionTagResourcesPtrInput is an input type that accepts PreventionDiscoveryConfigActionTagResourcesArgs, PreventionDiscoveryConfigActionTagResourcesPtr and PreventionDiscoveryConfigActionTagResourcesPtrOutput values. +// You can construct a concrete instance of `PreventionDiscoveryConfigActionTagResourcesPtrInput` via: +// +// PreventionDiscoveryConfigActionTagResourcesArgs{...} +// +// or: +// +// nil +type PreventionDiscoveryConfigActionTagResourcesPtrInput interface { + pulumi.Input + + ToPreventionDiscoveryConfigActionTagResourcesPtrOutput() PreventionDiscoveryConfigActionTagResourcesPtrOutput + ToPreventionDiscoveryConfigActionTagResourcesPtrOutputWithContext(context.Context) PreventionDiscoveryConfigActionTagResourcesPtrOutput +} + +type preventionDiscoveryConfigActionTagResourcesPtrType PreventionDiscoveryConfigActionTagResourcesArgs + +func PreventionDiscoveryConfigActionTagResourcesPtr(v *PreventionDiscoveryConfigActionTagResourcesArgs) PreventionDiscoveryConfigActionTagResourcesPtrInput { + return (*preventionDiscoveryConfigActionTagResourcesPtrType)(v) +} + +func (*preventionDiscoveryConfigActionTagResourcesPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**PreventionDiscoveryConfigActionTagResources)(nil)).Elem() +} + +func (i *preventionDiscoveryConfigActionTagResourcesPtrType) ToPreventionDiscoveryConfigActionTagResourcesPtrOutput() PreventionDiscoveryConfigActionTagResourcesPtrOutput { + return i.ToPreventionDiscoveryConfigActionTagResourcesPtrOutputWithContext(context.Background()) +} + +func (i *preventionDiscoveryConfigActionTagResourcesPtrType) ToPreventionDiscoveryConfigActionTagResourcesPtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigActionTagResourcesPtrOutput) +} + +type PreventionDiscoveryConfigActionTagResourcesOutput struct{ *pulumi.OutputState } + +func (PreventionDiscoveryConfigActionTagResourcesOutput) ElementType() reflect.Type { + return reflect.TypeOf((*PreventionDiscoveryConfigActionTagResources)(nil)).Elem() +} + +func (o PreventionDiscoveryConfigActionTagResourcesOutput) ToPreventionDiscoveryConfigActionTagResourcesOutput() PreventionDiscoveryConfigActionTagResourcesOutput { + return o +} + +func (o PreventionDiscoveryConfigActionTagResourcesOutput) ToPreventionDiscoveryConfigActionTagResourcesOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesOutput { + return o +} + +func (o PreventionDiscoveryConfigActionTagResourcesOutput) ToPreventionDiscoveryConfigActionTagResourcesPtrOutput() PreventionDiscoveryConfigActionTagResourcesPtrOutput { + return o.ToPreventionDiscoveryConfigActionTagResourcesPtrOutputWithContext(context.Background()) +} + +func (o PreventionDiscoveryConfigActionTagResourcesOutput) ToPreventionDiscoveryConfigActionTagResourcesPtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v PreventionDiscoveryConfigActionTagResources) *PreventionDiscoveryConfigActionTagResources { + return &v + }).(PreventionDiscoveryConfigActionTagResourcesPtrOutput) +} + +// Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. +func (o PreventionDiscoveryConfigActionTagResourcesOutput) LowerDataRiskToLow() pulumi.BoolPtrOutput { + return o.ApplyT(func(v PreventionDiscoveryConfigActionTagResources) *bool { return v.LowerDataRiskToLow }).(pulumi.BoolPtrOutput) +} + +// The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. +// Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. +func (o PreventionDiscoveryConfigActionTagResourcesOutput) ProfileGenerationsToTags() pulumi.StringArrayOutput { + return o.ApplyT(func(v PreventionDiscoveryConfigActionTagResources) []string { return v.ProfileGenerationsToTags }).(pulumi.StringArrayOutput) +} + +// The tags to associate with different conditions. +// Structure is documented below. +func (o PreventionDiscoveryConfigActionTagResourcesOutput) TagConditions() PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput { + return o.ApplyT(func(v PreventionDiscoveryConfigActionTagResources) []PreventionDiscoveryConfigActionTagResourcesTagCondition { + return v.TagConditions + }).(PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput) +} + +type PreventionDiscoveryConfigActionTagResourcesPtrOutput struct{ *pulumi.OutputState } + +func (PreventionDiscoveryConfigActionTagResourcesPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**PreventionDiscoveryConfigActionTagResources)(nil)).Elem() +} + +func (o PreventionDiscoveryConfigActionTagResourcesPtrOutput) ToPreventionDiscoveryConfigActionTagResourcesPtrOutput() PreventionDiscoveryConfigActionTagResourcesPtrOutput { + return o +} + +func (o PreventionDiscoveryConfigActionTagResourcesPtrOutput) ToPreventionDiscoveryConfigActionTagResourcesPtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesPtrOutput { + return o +} + +func (o PreventionDiscoveryConfigActionTagResourcesPtrOutput) Elem() PreventionDiscoveryConfigActionTagResourcesOutput { + return o.ApplyT(func(v *PreventionDiscoveryConfigActionTagResources) PreventionDiscoveryConfigActionTagResources { + if v != nil { + return *v + } + var ret PreventionDiscoveryConfigActionTagResources + return ret + }).(PreventionDiscoveryConfigActionTagResourcesOutput) +} + +// Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. +func (o PreventionDiscoveryConfigActionTagResourcesPtrOutput) LowerDataRiskToLow() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *PreventionDiscoveryConfigActionTagResources) *bool { + if v == nil { + return nil + } + return v.LowerDataRiskToLow + }).(pulumi.BoolPtrOutput) +} + +// The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. +// Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. +func (o PreventionDiscoveryConfigActionTagResourcesPtrOutput) ProfileGenerationsToTags() pulumi.StringArrayOutput { + return o.ApplyT(func(v *PreventionDiscoveryConfigActionTagResources) []string { + if v == nil { + return nil + } + return v.ProfileGenerationsToTags + }).(pulumi.StringArrayOutput) +} + +// The tags to associate with different conditions. +// Structure is documented below. +func (o PreventionDiscoveryConfigActionTagResourcesPtrOutput) TagConditions() PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput { + return o.ApplyT(func(v *PreventionDiscoveryConfigActionTagResources) []PreventionDiscoveryConfigActionTagResourcesTagCondition { + if v == nil { + return nil + } + return v.TagConditions + }).(PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput) +} + +type PreventionDiscoveryConfigActionTagResourcesTagCondition struct { + // Conditions attaching the tag to a resource on its profile having this sensitivity score. + // Structure is documented below. + SensitivityScore *PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore `pulumi:"sensitivityScore"` + // The tag value to attach to resources. + // Structure is documented below. + Tag *PreventionDiscoveryConfigActionTagResourcesTagConditionTag `pulumi:"tag"` +} + +// PreventionDiscoveryConfigActionTagResourcesTagConditionInput is an input type that accepts PreventionDiscoveryConfigActionTagResourcesTagConditionArgs and PreventionDiscoveryConfigActionTagResourcesTagConditionOutput values. +// You can construct a concrete instance of `PreventionDiscoveryConfigActionTagResourcesTagConditionInput` via: +// +// PreventionDiscoveryConfigActionTagResourcesTagConditionArgs{...} +type PreventionDiscoveryConfigActionTagResourcesTagConditionInput interface { + pulumi.Input + + ToPreventionDiscoveryConfigActionTagResourcesTagConditionOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionOutput + ToPreventionDiscoveryConfigActionTagResourcesTagConditionOutputWithContext(context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionOutput +} + +type PreventionDiscoveryConfigActionTagResourcesTagConditionArgs struct { + // Conditions attaching the tag to a resource on its profile having this sensitivity score. + // Structure is documented below. + SensitivityScore PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrInput `pulumi:"sensitivityScore"` + // The tag value to attach to resources. + // Structure is documented below. + Tag PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrInput `pulumi:"tag"` +} + +func (PreventionDiscoveryConfigActionTagResourcesTagConditionArgs) ElementType() reflect.Type { + return reflect.TypeOf((*PreventionDiscoveryConfigActionTagResourcesTagCondition)(nil)).Elem() +} + +func (i PreventionDiscoveryConfigActionTagResourcesTagConditionArgs) ToPreventionDiscoveryConfigActionTagResourcesTagConditionOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionOutput { + return i.ToPreventionDiscoveryConfigActionTagResourcesTagConditionOutputWithContext(context.Background()) +} + +func (i PreventionDiscoveryConfigActionTagResourcesTagConditionArgs) ToPreventionDiscoveryConfigActionTagResourcesTagConditionOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigActionTagResourcesTagConditionOutput) +} + +// PreventionDiscoveryConfigActionTagResourcesTagConditionArrayInput is an input type that accepts PreventionDiscoveryConfigActionTagResourcesTagConditionArray and PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput values. +// You can construct a concrete instance of `PreventionDiscoveryConfigActionTagResourcesTagConditionArrayInput` via: +// +// PreventionDiscoveryConfigActionTagResourcesTagConditionArray{ PreventionDiscoveryConfigActionTagResourcesTagConditionArgs{...} } +type PreventionDiscoveryConfigActionTagResourcesTagConditionArrayInput interface { + pulumi.Input + + ToPreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput + ToPreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutputWithContext(context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput +} + +type PreventionDiscoveryConfigActionTagResourcesTagConditionArray []PreventionDiscoveryConfigActionTagResourcesTagConditionInput + +func (PreventionDiscoveryConfigActionTagResourcesTagConditionArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]PreventionDiscoveryConfigActionTagResourcesTagCondition)(nil)).Elem() +} + +func (i PreventionDiscoveryConfigActionTagResourcesTagConditionArray) ToPreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput { + return i.ToPreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutputWithContext(context.Background()) +} + +func (i PreventionDiscoveryConfigActionTagResourcesTagConditionArray) ToPreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput) +} + +type PreventionDiscoveryConfigActionTagResourcesTagConditionOutput struct{ *pulumi.OutputState } + +func (PreventionDiscoveryConfigActionTagResourcesTagConditionOutput) ElementType() reflect.Type { + return reflect.TypeOf((*PreventionDiscoveryConfigActionTagResourcesTagCondition)(nil)).Elem() +} + +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionOutput { + return o +} + +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionOutput { + return o +} + +// Conditions attaching the tag to a resource on its profile having this sensitivity score. +// Structure is documented below. +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionOutput) SensitivityScore() PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput { + return o.ApplyT(func(v PreventionDiscoveryConfigActionTagResourcesTagCondition) *PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore { + return v.SensitivityScore + }).(PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput) +} + +// The tag value to attach to resources. +// Structure is documented below. +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionOutput) Tag() PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput { + return o.ApplyT(func(v PreventionDiscoveryConfigActionTagResourcesTagCondition) *PreventionDiscoveryConfigActionTagResourcesTagConditionTag { + return v.Tag + }).(PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput) +} + +type PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput struct{ *pulumi.OutputState } + +func (PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]PreventionDiscoveryConfigActionTagResourcesTagCondition)(nil)).Elem() +} + +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput { + return o +} + +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput { + return o +} + +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput) Index(i pulumi.IntInput) PreventionDiscoveryConfigActionTagResourcesTagConditionOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) PreventionDiscoveryConfigActionTagResourcesTagCondition { + return vs[0].([]PreventionDiscoveryConfigActionTagResourcesTagCondition)[vs[1].(int)] + }).(PreventionDiscoveryConfigActionTagResourcesTagConditionOutput) +} + +type PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore struct { + // The sensitivity score applied to the resource. + // Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + Score string `pulumi:"score"` +} + +// PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreInput is an input type that accepts PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs and PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput values. +// You can construct a concrete instance of `PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreInput` via: +// +// PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs{...} +type PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreInput interface { + pulumi.Input + + ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput + ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutputWithContext(context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput +} + +type PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs struct { + // The sensitivity score applied to the resource. + // Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + Score pulumi.StringInput `pulumi:"score"` +} + +func (PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs) ElementType() reflect.Type { + return reflect.TypeOf((*PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore)(nil)).Elem() +} + +func (i PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs) ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput { + return i.ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutputWithContext(context.Background()) +} + +func (i PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs) ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput) +} + +func (i PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs) ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput { + return i.ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutputWithContext(context.Background()) +} + +func (i PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs) ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput).ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutputWithContext(ctx) +} + +// PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrInput is an input type that accepts PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs, PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtr and PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput values. +// You can construct a concrete instance of `PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrInput` via: +// +// PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs{...} +// +// or: +// +// nil +type PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrInput interface { + pulumi.Input + + ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput + ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutputWithContext(context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput +} + +type preventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrType PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs + +func PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtr(v *PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs) PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrInput { + return (*preventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrType)(v) +} + +func (*preventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrType) ElementType() reflect.Type { + return reflect.TypeOf((**PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore)(nil)).Elem() +} + +func (i *preventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrType) ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput { + return i.ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutputWithContext(context.Background()) +} + +func (i *preventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrType) ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput) +} + +type PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput struct{ *pulumi.OutputState } + +func (PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput) ElementType() reflect.Type { + return reflect.TypeOf((*PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore)(nil)).Elem() +} + +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput { + return o +} + +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput { + return o +} + +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput { + return o.ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutputWithContext(context.Background()) +} + +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore) *PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore { + return &v + }).(PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput) +} + +// The sensitivity score applied to the resource. +// Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput) Score() pulumi.StringOutput { + return o.ApplyT(func(v PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore) string { return v.Score }).(pulumi.StringOutput) +} + +type PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput struct{ *pulumi.OutputState } + +func (PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore)(nil)).Elem() +} + +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput { + return o +} + +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput { + return o +} + +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput) Elem() PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput { + return o.ApplyT(func(v *PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore) PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore { + if v != nil { + return *v + } + var ret PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore + return ret + }).(PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput) +} + +// The sensitivity score applied to the resource. +// Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput) Score() pulumi.StringPtrOutput { + return o.ApplyT(func(v *PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore) *string { + if v == nil { + return nil + } + return &v.Score + }).(pulumi.StringPtrOutput) +} + +type PreventionDiscoveryConfigActionTagResourcesTagConditionTag struct { + // The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + NamespacedValue *string `pulumi:"namespacedValue"` +} + +// PreventionDiscoveryConfigActionTagResourcesTagConditionTagInput is an input type that accepts PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs and PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput values. +// You can construct a concrete instance of `PreventionDiscoveryConfigActionTagResourcesTagConditionTagInput` via: +// +// PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs{...} +type PreventionDiscoveryConfigActionTagResourcesTagConditionTagInput interface { + pulumi.Input + + ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput + ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagOutputWithContext(context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput +} + +type PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs struct { + // The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + NamespacedValue pulumi.StringPtrInput `pulumi:"namespacedValue"` +} + +func (PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs) ElementType() reflect.Type { + return reflect.TypeOf((*PreventionDiscoveryConfigActionTagResourcesTagConditionTag)(nil)).Elem() +} + +func (i PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs) ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput { + return i.ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagOutputWithContext(context.Background()) +} + +func (i PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs) ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput) +} + +func (i PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs) ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput { + return i.ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutputWithContext(context.Background()) +} + +func (i PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs) ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput).ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutputWithContext(ctx) +} + +// PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrInput is an input type that accepts PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs, PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtr and PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput values. +// You can construct a concrete instance of `PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrInput` via: +// +// PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs{...} +// +// or: +// +// nil +type PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrInput interface { + pulumi.Input + + ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput + ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutputWithContext(context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput } -type PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArray []PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionInput +type preventionDiscoveryConfigActionTagResourcesTagConditionTagPtrType PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs -func (PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArray) ElementType() reflect.Type { - return reflect.TypeOf((*[]PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition)(nil)).Elem() +func PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtr(v *PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs) PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrInput { + return (*preventionDiscoveryConfigActionTagResourcesTagConditionTagPtrType)(v) } -func (i PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArray) ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput() PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput { - return i.ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutputWithContext(context.Background()) +func (*preventionDiscoveryConfigActionTagResourcesTagConditionTagPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**PreventionDiscoveryConfigActionTagResourcesTagConditionTag)(nil)).Elem() } -func (i PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArray) ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput { - return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput) +func (i *preventionDiscoveryConfigActionTagResourcesTagConditionTagPtrType) ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput { + return i.ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutputWithContext(context.Background()) } -type PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput struct{ *pulumi.OutputState } +func (i *preventionDiscoveryConfigActionTagResourcesTagConditionTagPtrType) ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput) +} -func (PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput) ElementType() reflect.Type { - return reflect.TypeOf((*PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition)(nil)).Elem() +type PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput struct{ *pulumi.OutputState } + +func (PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput) ElementType() reflect.Type { + return reflect.TypeOf((*PreventionDiscoveryConfigActionTagResourcesTagConditionTag)(nil)).Elem() } -func (o PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput) ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput() PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput { +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput { return o } -func (o PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput) ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput { +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput { return o } -// The minimum data risk score that triggers the condition. -// Possible values are: `HIGH`, `MEDIUM_OR_HIGH`. -func (o PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput) MinimumRiskScore() pulumi.StringPtrOutput { - return o.ApplyT(func(v PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition) *string { - return v.MinimumRiskScore - }).(pulumi.StringPtrOutput) +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput { + return o.ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutputWithContext(context.Background()) } -// The minimum sensitivity level that triggers the condition. -// Possible values are: `HIGH`, `MEDIUM_OR_HIGH`. -func (o PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput) MinimumSensitivityScore() pulumi.StringPtrOutput { - return o.ApplyT(func(v PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition) *string { - return v.MinimumSensitivityScore - }).(pulumi.StringPtrOutput) +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v PreventionDiscoveryConfigActionTagResourcesTagConditionTag) *PreventionDiscoveryConfigActionTagResourcesTagConditionTag { + return &v + }).(PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput) } -type PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput struct{ *pulumi.OutputState } +// The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput) NamespacedValue() pulumi.StringPtrOutput { + return o.ApplyT(func(v PreventionDiscoveryConfigActionTagResourcesTagConditionTag) *string { return v.NamespacedValue }).(pulumi.StringPtrOutput) +} -func (PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput) ElementType() reflect.Type { - return reflect.TypeOf((*[]PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition)(nil)).Elem() +type PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput struct{ *pulumi.OutputState } + +func (PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**PreventionDiscoveryConfigActionTagResourcesTagConditionTag)(nil)).Elem() } -func (o PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput) ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput() PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput { +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput() PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput { return o } -func (o PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput) ToPreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput { +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput) ToPreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput { return o } -func (o PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput) Index(i pulumi.IntInput) PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput { - return pulumi.All(o, i).ApplyT(func(vs []interface{}) PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition { - return vs[0].([]PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition)[vs[1].(int)] - }).(PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput) +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput) Elem() PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput { + return o.ApplyT(func(v *PreventionDiscoveryConfigActionTagResourcesTagConditionTag) PreventionDiscoveryConfigActionTagResourcesTagConditionTag { + if v != nil { + return *v + } + var ret PreventionDiscoveryConfigActionTagResourcesTagConditionTag + return ret + }).(PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput) +} + +// The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". +func (o PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput) NamespacedValue() pulumi.StringPtrOutput { + return o.ApplyT(func(v *PreventionDiscoveryConfigActionTagResourcesTagConditionTag) *string { + if v == nil { + return nil + } + return v.NamespacedValue + }).(pulumi.StringPtrOutput) } type PreventionDiscoveryConfigError struct { @@ -36728,6 +37321,9 @@ func (o PreventionDiscoveryConfigTargetBigQueryTargetPtrOutput) Filter() Prevent } type PreventionDiscoveryConfigTargetBigQueryTargetCadence struct { + // Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + // Structure is documented below. + InspectTemplateModifiedCadence *PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence `pulumi:"inspectTemplateModifiedCadence"` // Governs when to update data profiles when a schema is modified // Structure is documented below. SchemaModifiedCadence *PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence `pulumi:"schemaModifiedCadence"` @@ -36748,6 +37344,9 @@ type PreventionDiscoveryConfigTargetBigQueryTargetCadenceInput interface { } type PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs struct { + // Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + // Structure is documented below. + InspectTemplateModifiedCadence PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrInput `pulumi:"inspectTemplateModifiedCadence"` // Governs when to update data profiles when a schema is modified // Structure is documented below. SchemaModifiedCadence PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadencePtrInput `pulumi:"schemaModifiedCadence"` @@ -36833,6 +37432,14 @@ func (o PreventionDiscoveryConfigTargetBigQueryTargetCadenceOutput) ToPrevention }).(PreventionDiscoveryConfigTargetBigQueryTargetCadencePtrOutput) } +// Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. +// Structure is documented below. +func (o PreventionDiscoveryConfigTargetBigQueryTargetCadenceOutput) InspectTemplateModifiedCadence() PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput { + return o.ApplyT(func(v PreventionDiscoveryConfigTargetBigQueryTargetCadence) *PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence { + return v.InspectTemplateModifiedCadence + }).(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput) +} + // Governs when to update data profiles when a schema is modified // Structure is documented below. func (o PreventionDiscoveryConfigTargetBigQueryTargetCadenceOutput) SchemaModifiedCadence() PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadencePtrOutput { @@ -36873,6 +37480,17 @@ func (o PreventionDiscoveryConfigTargetBigQueryTargetCadencePtrOutput) Elem() Pr }).(PreventionDiscoveryConfigTargetBigQueryTargetCadenceOutput) } +// Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. +// Structure is documented below. +func (o PreventionDiscoveryConfigTargetBigQueryTargetCadencePtrOutput) InspectTemplateModifiedCadence() PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput { + return o.ApplyT(func(v *PreventionDiscoveryConfigTargetBigQueryTargetCadence) *PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence { + if v == nil { + return nil + } + return v.InspectTemplateModifiedCadence + }).(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput) +} + // Governs when to update data profiles when a schema is modified // Structure is documented below. func (o PreventionDiscoveryConfigTargetBigQueryTargetCadencePtrOutput) SchemaModifiedCadence() PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadencePtrOutput { @@ -36895,6 +37513,149 @@ func (o PreventionDiscoveryConfigTargetBigQueryTargetCadencePtrOutput) TableModi }).(PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadencePtrOutput) } +type PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence struct { + // How frequently data profiles can be updated when the template is modified. Defaults to never. + // Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + Frequency *string `pulumi:"frequency"` +} + +// PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceInput is an input type that accepts PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs and PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput values. +// You can construct a concrete instance of `PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceInput` via: +// +// PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs{...} +type PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceInput interface { + pulumi.Input + + ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput() PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput + ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutputWithContext(context.Context) PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput +} + +type PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs struct { + // How frequently data profiles can be updated when the template is modified. Defaults to never. + // Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + Frequency pulumi.StringPtrInput `pulumi:"frequency"` +} + +func (PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs) ElementType() reflect.Type { + return reflect.TypeOf((*PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence)(nil)).Elem() +} + +func (i PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs) ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput() PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput { + return i.ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutputWithContext(context.Background()) +} + +func (i PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs) ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutputWithContext(ctx context.Context) PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput) +} + +func (i PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs) ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput() PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput { + return i.ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutputWithContext(context.Background()) +} + +func (i PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs) ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput).ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutputWithContext(ctx) +} + +// PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrInput is an input type that accepts PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs, PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtr and PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput values. +// You can construct a concrete instance of `PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrInput` via: +// +// PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs{...} +// +// or: +// +// nil +type PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrInput interface { + pulumi.Input + + ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput() PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput + ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutputWithContext(context.Context) PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput +} + +type preventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrType PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs + +func PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtr(v *PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs) PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrInput { + return (*preventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrType)(v) +} + +func (*preventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrType) ElementType() reflect.Type { + return reflect.TypeOf((**PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence)(nil)).Elem() +} + +func (i *preventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrType) ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput() PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput { + return i.ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutputWithContext(context.Background()) +} + +func (i *preventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrType) ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput) +} + +type PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput struct{ *pulumi.OutputState } + +func (PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput) ElementType() reflect.Type { + return reflect.TypeOf((*PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence)(nil)).Elem() +} + +func (o PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput) ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput() PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput { + return o +} + +func (o PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput) ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutputWithContext(ctx context.Context) PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput { + return o +} + +func (o PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput) ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput() PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput { + return o.ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutputWithContext(context.Background()) +} + +func (o PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput) ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence) *PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence { + return &v + }).(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput) +} + +// How frequently data profiles can be updated when the template is modified. Defaults to never. +// Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. +func (o PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput) Frequency() pulumi.StringPtrOutput { + return o.ApplyT(func(v PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence) *string { + return v.Frequency + }).(pulumi.StringPtrOutput) +} + +type PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput struct{ *pulumi.OutputState } + +func (PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence)(nil)).Elem() +} + +func (o PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput) ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput() PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput { + return o +} + +func (o PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput) ToPreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput { + return o +} + +func (o PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput) Elem() PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput { + return o.ApplyT(func(v *PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence) PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence { + if v != nil { + return *v + } + var ret PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence + return ret + }).(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput) +} + +// How frequently data profiles can be updated when the template is modified. Defaults to never. +// Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. +func (o PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput) Frequency() pulumi.StringPtrOutput { + return o.ApplyT(func(v *PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence) *string { + if v == nil { + return nil + } + return v.Frequency + }).(pulumi.StringPtrOutput) +} + type PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence struct { // Frequency to regenerate data profiles when the schema is modified. Defaults to monthly. // Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. @@ -40150,6 +40911,9 @@ func (o PreventionDiscoveryConfigTargetCloudSqlTargetFilterOthersPtrOutput) Elem } type PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence struct { + // Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + // Structure is documented below. + InspectTemplateModifiedCadence *PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence `pulumi:"inspectTemplateModifiedCadence"` // Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. // Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. RefreshFrequency *string `pulumi:"refreshFrequency"` @@ -40170,6 +40934,9 @@ type PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInput interfa } type PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs struct { + // Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + // Structure is documented below. + InspectTemplateModifiedCadence PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrInput `pulumi:"inspectTemplateModifiedCadence"` // Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. // Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. RefreshFrequency pulumi.StringPtrInput `pulumi:"refreshFrequency"` @@ -40255,6 +41022,14 @@ func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceOutput) To }).(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadencePtrOutput) } +// Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. +// Structure is documented below. +func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceOutput) InspectTemplateModifiedCadence() PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput { + return o.ApplyT(func(v PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence) *PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence { + return v.InspectTemplateModifiedCadence + }).(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput) +} + // Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. // Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceOutput) RefreshFrequency() pulumi.StringPtrOutput { @@ -40295,6 +41070,17 @@ func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadencePtrOutput) }).(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceOutput) } +// Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. +// Structure is documented below. +func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadencePtrOutput) InspectTemplateModifiedCadence() PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput { + return o.ApplyT(func(v *PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence) *PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence { + if v == nil { + return nil + } + return v.InspectTemplateModifiedCadence + }).(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput) +} + // Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. // Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadencePtrOutput) RefreshFrequency() pulumi.StringPtrOutput { @@ -40317,6 +41103,149 @@ func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadencePtrOutput) }).(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadencePtrOutput) } +type PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence struct { + // How frequently data profiles can be updated when the template is modified. Defaults to never. + // Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + Frequency string `pulumi:"frequency"` +} + +// PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceInput is an input type that accepts PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs and PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput values. +// You can construct a concrete instance of `PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceInput` via: +// +// PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs{...} +type PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceInput interface { + pulumi.Input + + ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput() PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput + ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutputWithContext(context.Context) PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput +} + +type PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs struct { + // How frequently data profiles can be updated when the template is modified. Defaults to never. + // Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + Frequency pulumi.StringInput `pulumi:"frequency"` +} + +func (PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs) ElementType() reflect.Type { + return reflect.TypeOf((*PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence)(nil)).Elem() +} + +func (i PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs) ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput() PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput { + return i.ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutputWithContext(context.Background()) +} + +func (i PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs) ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutputWithContext(ctx context.Context) PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput) +} + +func (i PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs) ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput() PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput { + return i.ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutputWithContext(context.Background()) +} + +func (i PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs) ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput).ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutputWithContext(ctx) +} + +// PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrInput is an input type that accepts PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs, PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtr and PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput values. +// You can construct a concrete instance of `PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrInput` via: +// +// PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs{...} +// +// or: +// +// nil +type PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrInput interface { + pulumi.Input + + ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput() PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput + ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutputWithContext(context.Context) PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput +} + +type preventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrType PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs + +func PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtr(v *PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs) PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrInput { + return (*preventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrType)(v) +} + +func (*preventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrType) ElementType() reflect.Type { + return reflect.TypeOf((**PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence)(nil)).Elem() +} + +func (i *preventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrType) ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput() PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput { + return i.ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutputWithContext(context.Background()) +} + +func (i *preventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrType) ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput) +} + +type PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput struct{ *pulumi.OutputState } + +func (PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput) ElementType() reflect.Type { + return reflect.TypeOf((*PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence)(nil)).Elem() +} + +func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput) ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput() PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput { + return o +} + +func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput) ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutputWithContext(ctx context.Context) PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput { + return o +} + +func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput) ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput() PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput { + return o.ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutputWithContext(context.Background()) +} + +func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput) ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence) *PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence { + return &v + }).(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput) +} + +// How frequently data profiles can be updated when the template is modified. Defaults to never. +// Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. +func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput) Frequency() pulumi.StringOutput { + return o.ApplyT(func(v PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence) string { + return v.Frequency + }).(pulumi.StringOutput) +} + +type PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput struct{ *pulumi.OutputState } + +func (PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence)(nil)).Elem() +} + +func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput) ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput() PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput { + return o +} + +func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput) ToPreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutputWithContext(ctx context.Context) PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput { + return o +} + +func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput) Elem() PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput { + return o.ApplyT(func(v *PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence) PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence { + if v != nil { + return *v + } + var ret PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence + return ret + }).(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput) +} + +// How frequently data profiles can be updated when the template is modified. Defaults to never. +// Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. +func (o PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput) Frequency() pulumi.StringPtrOutput { + return o.ApplyT(func(v *PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence) *string { + if v == nil { + return nil + } + return &v.Frequency + }).(pulumi.StringPtrOutput) +} + type PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence struct { // Frequency to regenerate data profiles when the schema is modified. Defaults to monthly. // Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. @@ -61538,6 +62467,14 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsPtrInput)(nil)).Elem(), PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionInput)(nil)).Elem(), PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayInput)(nil)).Elem(), PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigActionTagResourcesInput)(nil)).Elem(), PreventionDiscoveryConfigActionTagResourcesArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigActionTagResourcesPtrInput)(nil)).Elem(), PreventionDiscoveryConfigActionTagResourcesArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigActionTagResourcesTagConditionInput)(nil)).Elem(), PreventionDiscoveryConfigActionTagResourcesTagConditionArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigActionTagResourcesTagConditionArrayInput)(nil)).Elem(), PreventionDiscoveryConfigActionTagResourcesTagConditionArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreInput)(nil)).Elem(), PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrInput)(nil)).Elem(), PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigActionTagResourcesTagConditionTagInput)(nil)).Elem(), PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrInput)(nil)).Elem(), PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigErrorInput)(nil)).Elem(), PreventionDiscoveryConfigErrorArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigErrorArrayInput)(nil)).Elem(), PreventionDiscoveryConfigErrorArray{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigErrorDetailsInput)(nil)).Elem(), PreventionDiscoveryConfigErrorDetailsArgs{}) @@ -61552,6 +62489,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetBigQueryTargetPtrInput)(nil)).Elem(), PreventionDiscoveryConfigTargetBigQueryTargetArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetBigQueryTargetCadenceInput)(nil)).Elem(), PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetBigQueryTargetCadencePtrInput)(nil)).Elem(), PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceInput)(nil)).Elem(), PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrInput)(nil)).Elem(), PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadenceInput)(nil)).Elem(), PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadenceArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadencePtrInput)(nil)).Elem(), PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadenceArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadenceInput)(nil)).Elem(), PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadenceArgs{}) @@ -61596,6 +62535,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetCloudSqlTargetFilterOthersPtrInput)(nil)).Elem(), PreventionDiscoveryConfigTargetCloudSqlTargetFilterOthersArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInput)(nil)).Elem(), PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadencePtrInput)(nil)).Elem(), PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceInput)(nil)).Elem(), PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrInput)(nil)).Elem(), PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadenceInput)(nil)).Elem(), PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadenceArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadencePtrInput)(nil)).Elem(), PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadenceArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*PreventionDiscoveryConfigTargetCloudStorageTargetInput)(nil)).Elem(), PreventionDiscoveryConfigTargetCloudStorageTargetArgs{}) @@ -62259,6 +63200,14 @@ func init() { pulumi.RegisterOutputType(PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsPtrOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArrayOutput{}) + pulumi.RegisterOutputType(PreventionDiscoveryConfigActionTagResourcesOutput{}) + pulumi.RegisterOutputType(PreventionDiscoveryConfigActionTagResourcesPtrOutput{}) + pulumi.RegisterOutputType(PreventionDiscoveryConfigActionTagResourcesTagConditionOutput{}) + pulumi.RegisterOutputType(PreventionDiscoveryConfigActionTagResourcesTagConditionArrayOutput{}) + pulumi.RegisterOutputType(PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreOutput{}) + pulumi.RegisterOutputType(PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScorePtrOutput{}) + pulumi.RegisterOutputType(PreventionDiscoveryConfigActionTagResourcesTagConditionTagOutput{}) + pulumi.RegisterOutputType(PreventionDiscoveryConfigActionTagResourcesTagConditionTagPtrOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigErrorOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigErrorArrayOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigErrorDetailsOutput{}) @@ -62273,6 +63222,8 @@ func init() { pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetBigQueryTargetPtrOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetBigQueryTargetCadenceOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetBigQueryTargetCadencePtrOutput{}) + pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceOutput{}) + pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadencePtrOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadenceOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadencePtrOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadenceOutput{}) @@ -62317,6 +63268,8 @@ func init() { pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetCloudSqlTargetFilterOthersPtrOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadencePtrOutput{}) + pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceOutput{}) + pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadencePtrOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadenceOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadencePtrOutput{}) pulumi.RegisterOutputType(PreventionDiscoveryConfigTargetCloudStorageTargetOutput{}) diff --git a/sdk/go/gcp/dataproc/pulumiTypes.go b/sdk/go/gcp/dataproc/pulumiTypes.go index acd6f83781..77f67d5fef 100644 --- a/sdk/go/gcp/dataproc/pulumiTypes.go +++ b/sdk/go/gcp/dataproc/pulumiTypes.go @@ -21722,7 +21722,7 @@ type WorkflowTemplatePlacementManagedClusterConfig struct { SecurityConfig *WorkflowTemplatePlacementManagedClusterConfigSecurityConfig `pulumi:"securityConfig"` // The config settings for software inside the cluster. SoftwareConfig *WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig `pulumi:"softwareConfig"` - // A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + // A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). StagingBucket *string `pulumi:"stagingBucket"` // A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. TempBucket *string `pulumi:"tempBucket"` @@ -21768,7 +21768,7 @@ type WorkflowTemplatePlacementManagedClusterConfigArgs struct { SecurityConfig WorkflowTemplatePlacementManagedClusterConfigSecurityConfigPtrInput `pulumi:"securityConfig"` // The config settings for software inside the cluster. SoftwareConfig WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrInput `pulumi:"softwareConfig"` - // A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + // A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). StagingBucket pulumi.StringPtrInput `pulumi:"stagingBucket"` // A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. TempBucket pulumi.StringPtrInput `pulumi:"tempBucket"` @@ -21939,7 +21939,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigOutput) SoftwareConfig() Wo }).(WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput) } -// A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). +// A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). func (o WorkflowTemplatePlacementManagedClusterConfigOutput) StagingBucket() pulumi.StringPtrOutput { return o.ApplyT(func(v WorkflowTemplatePlacementManagedClusterConfig) *string { return v.StagingBucket }).(pulumi.StringPtrOutput) } @@ -22102,7 +22102,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigPtrOutput) SoftwareConfig() }).(WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigPtrOutput) } -// A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). +// A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). func (o WorkflowTemplatePlacementManagedClusterConfigPtrOutput) StagingBucket() pulumi.StringPtrOutput { return o.ApplyT(func(v *WorkflowTemplatePlacementManagedClusterConfig) *string { if v == nil { @@ -22573,7 +22573,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigEndpointConfigPtrOutput) Ht type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig struct { // If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internalIpOnly` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. InternalIpOnly *bool `pulumi:"internalIpOnly"` - // The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + // The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). Metadata map[string]string `pulumi:"metadata"` // The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `networkUri` nor `subnetworkUri` is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default`*`default` Network *string `pulumi:"network"` @@ -22591,7 +22591,7 @@ type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig struct { ShieldedInstanceConfig *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfig `pulumi:"shieldedInstanceConfig"` // The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0` * `sub0` Subnetwork *string `pulumi:"subnetwork"` - // The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + // The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). Tags []string `pulumi:"tags"` // The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` * `us-central1-f` Zone *string `pulumi:"zone"` @@ -22611,7 +22611,7 @@ type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigInput interfac type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs struct { // If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internalIpOnly` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. InternalIpOnly pulumi.BoolPtrInput `pulumi:"internalIpOnly"` - // The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + // The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). Metadata pulumi.StringMapInput `pulumi:"metadata"` // The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `networkUri` nor `subnetworkUri` is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default`*`default` Network pulumi.StringPtrInput `pulumi:"network"` @@ -22629,7 +22629,7 @@ type WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs struct { ShieldedInstanceConfig WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigPtrInput `pulumi:"shieldedInstanceConfig"` // The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0` * `sub0` Subnetwork pulumi.StringPtrInput `pulumi:"subnetwork"` - // The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + // The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). Tags pulumi.StringArrayInput `pulumi:"tags"` // The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` * `us-central1-f` Zone pulumi.StringPtrInput `pulumi:"zone"` @@ -22717,7 +22717,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) Int return o.ApplyT(func(v WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) *bool { return v.InternalIpOnly }).(pulumi.BoolPtrOutput) } -// The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). +// The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). func (o WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) Metadata() pulumi.StringMapOutput { return o.ApplyT(func(v WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) map[string]string { return v.Metadata @@ -22774,7 +22774,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) Sub return o.ApplyT(func(v WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) *string { return v.Subnetwork }).(pulumi.StringPtrOutput) } -// The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). +// The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). func (o WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigOutput) Tags() pulumi.StringArrayOutput { return o.ApplyT(func(v WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) []string { return v.Tags }).(pulumi.StringArrayOutput) } @@ -22818,7 +22818,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) }).(pulumi.BoolPtrOutput) } -// The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). +// The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). func (o WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) Metadata() pulumi.StringMapOutput { return o.ApplyT(func(v *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) map[string]string { if v == nil { @@ -22908,7 +22908,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) }).(pulumi.StringPtrOutput) } -// The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). +// The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). func (o WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigPtrOutput) Tags() pulumi.StringArrayOutput { return o.ApplyT(func(v *WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig) []string { if v == nil { @@ -23731,7 +23731,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedG type WorkflowTemplatePlacementManagedClusterConfigInitializationAction struct { // Required. Cloud Storage URI of executable file. ExecutableFile *string `pulumi:"executableFile"` - // Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + // Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. ExecutionTimeout *string `pulumi:"executionTimeout"` } @@ -23749,7 +23749,7 @@ type WorkflowTemplatePlacementManagedClusterConfigInitializationActionInput inte type WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs struct { // Required. Cloud Storage URI of executable file. ExecutableFile pulumi.StringPtrInput `pulumi:"executableFile"` - // Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + // Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. ExecutionTimeout pulumi.StringPtrInput `pulumi:"executionTimeout"` } @@ -23811,7 +23811,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput) }).(pulumi.StringPtrOutput) } -// Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. +// Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. func (o WorkflowTemplatePlacementManagedClusterConfigInitializationActionOutput) ExecutionTimeout() pulumi.StringPtrOutput { return o.ApplyT(func(v WorkflowTemplatePlacementManagedClusterConfigInitializationAction) *string { return v.ExecutionTimeout @@ -23839,13 +23839,13 @@ func (o WorkflowTemplatePlacementManagedClusterConfigInitializationActionArrayOu } type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig struct { - // The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). AutoDeleteTime *string `pulumi:"autoDeleteTime"` - // The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). AutoDeleteTtl *string `pulumi:"autoDeleteTtl"` - // The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + // The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). IdleDeleteTtl *string `pulumi:"idleDeleteTtl"` - // Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). IdleStartTime *string `pulumi:"idleStartTime"` } @@ -23861,13 +23861,13 @@ type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigInput interface } type WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs struct { - // The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). AutoDeleteTime pulumi.StringPtrInput `pulumi:"autoDeleteTime"` - // The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). AutoDeleteTtl pulumi.StringPtrInput `pulumi:"autoDeleteTtl"` - // The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + // The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). IdleDeleteTtl pulumi.StringPtrInput `pulumi:"idleDeleteTtl"` - // Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + // Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). IdleStartTime pulumi.StringPtrInput `pulumi:"idleStartTime"` } @@ -23948,22 +23948,22 @@ func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) ToWo }).(WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) } -// The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). +// The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) AutoDeleteTime() pulumi.StringPtrOutput { return o.ApplyT(func(v WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) *string { return v.AutoDeleteTime }).(pulumi.StringPtrOutput) } -// The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). +// The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) AutoDeleteTtl() pulumi.StringPtrOutput { return o.ApplyT(func(v WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) *string { return v.AutoDeleteTtl }).(pulumi.StringPtrOutput) } -// The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). +// The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) IdleDeleteTtl() pulumi.StringPtrOutput { return o.ApplyT(func(v WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) *string { return v.IdleDeleteTtl }).(pulumi.StringPtrOutput) } -// Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). +// Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) IdleStartTime() pulumi.StringPtrOutput { return o.ApplyT(func(v WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) *string { return v.IdleStartTime }).(pulumi.StringPtrOutput) } @@ -23992,7 +23992,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) E }).(WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigOutput) } -// The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). +// The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) AutoDeleteTime() pulumi.StringPtrOutput { return o.ApplyT(func(v *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) *string { if v == nil { @@ -24002,7 +24002,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) A }).(pulumi.StringPtrOutput) } -// The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). +// The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) AutoDeleteTtl() pulumi.StringPtrOutput { return o.ApplyT(func(v *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) *string { if v == nil { @@ -24012,7 +24012,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) A }).(pulumi.StringPtrOutput) } -// The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). +// The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) IdleDeleteTtl() pulumi.StringPtrOutput { return o.ApplyT(func(v *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) *string { if v == nil { @@ -24022,7 +24022,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) I }).(pulumi.StringPtrOutput) } -// Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). +// Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). func (o WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigPtrOutput) IdleStartTime() pulumi.StringPtrOutput { return o.ApplyT(func(v *WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig) *string { if v == nil { @@ -24047,7 +24047,7 @@ type WorkflowTemplatePlacementManagedClusterConfigMasterConfig struct { MachineType *string `pulumi:"machineType"` // Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. ManagedGroupConfigs []WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig `pulumi:"managedGroupConfigs"` - // Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + // Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). MinCpuPlatform *string `pulumi:"minCpuPlatform"` // The number of VM instances in the instance group. For master instance groups, must be set to 1. NumInstances *int `pulumi:"numInstances"` @@ -24081,7 +24081,7 @@ type WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs struct { MachineType pulumi.StringPtrInput `pulumi:"machineType"` // Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. ManagedGroupConfigs WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayInput `pulumi:"managedGroupConfigs"` - // Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + // Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). MinCpuPlatform pulumi.StringPtrInput `pulumi:"minCpuPlatform"` // The number of VM instances in the instance group. For master instance groups, must be set to 1. NumInstances pulumi.IntPtrInput `pulumi:"numInstances"` @@ -24207,7 +24207,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) Managed }).(WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutput) } -// Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). +// Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). func (o WorkflowTemplatePlacementManagedClusterConfigMasterConfigOutput) MinCpuPlatform() pulumi.StringPtrOutput { return o.ApplyT(func(v WorkflowTemplatePlacementManagedClusterConfigMasterConfig) *string { return v.MinCpuPlatform }).(pulumi.StringPtrOutput) } @@ -24316,7 +24316,7 @@ func (o WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) Mana }).(WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArrayOutput) } -// Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). +// Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). func (o WorkflowTemplatePlacementManagedClusterConfigMasterConfigPtrOutput) MinCpuPlatform() pulumi.StringPtrOutput { return o.ApplyT(func(v *WorkflowTemplatePlacementManagedClusterConfigMasterConfig) *string { if v == nil { diff --git a/sdk/go/gcp/datastream/pulumiTypes.go b/sdk/go/gcp/datastream/pulumiTypes.go index 08f26a3293..3e42fb9d00 100644 --- a/sdk/go/gcp/datastream/pulumiTypes.go +++ b/sdk/go/gcp/datastream/pulumiTypes.go @@ -10826,6 +10826,8 @@ func (o StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPo } type StreamSourceConfigSqlServerSourceConfig struct { + // CDC reader reads from change tables. + ChangeTables *StreamSourceConfigSqlServerSourceConfigChangeTables `pulumi:"changeTables"` // SQL Server objects to exclude from the stream. // Structure is documented below. ExcludeObjects *StreamSourceConfigSqlServerSourceConfigExcludeObjects `pulumi:"excludeObjects"` @@ -10836,6 +10838,8 @@ type StreamSourceConfigSqlServerSourceConfig struct { MaxConcurrentBackfillTasks *int `pulumi:"maxConcurrentBackfillTasks"` // Max concurrent CDC tasks. MaxConcurrentCdcTasks *int `pulumi:"maxConcurrentCdcTasks"` + // CDC reader reads from transaction logs. + TransactionLogs *StreamSourceConfigSqlServerSourceConfigTransactionLogs `pulumi:"transactionLogs"` } // StreamSourceConfigSqlServerSourceConfigInput is an input type that accepts StreamSourceConfigSqlServerSourceConfigArgs and StreamSourceConfigSqlServerSourceConfigOutput values. @@ -10850,6 +10854,8 @@ type StreamSourceConfigSqlServerSourceConfigInput interface { } type StreamSourceConfigSqlServerSourceConfigArgs struct { + // CDC reader reads from change tables. + ChangeTables StreamSourceConfigSqlServerSourceConfigChangeTablesPtrInput `pulumi:"changeTables"` // SQL Server objects to exclude from the stream. // Structure is documented below. ExcludeObjects StreamSourceConfigSqlServerSourceConfigExcludeObjectsPtrInput `pulumi:"excludeObjects"` @@ -10860,6 +10866,8 @@ type StreamSourceConfigSqlServerSourceConfigArgs struct { MaxConcurrentBackfillTasks pulumi.IntPtrInput `pulumi:"maxConcurrentBackfillTasks"` // Max concurrent CDC tasks. MaxConcurrentCdcTasks pulumi.IntPtrInput `pulumi:"maxConcurrentCdcTasks"` + // CDC reader reads from transaction logs. + TransactionLogs StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrInput `pulumi:"transactionLogs"` } func (StreamSourceConfigSqlServerSourceConfigArgs) ElementType() reflect.Type { @@ -10939,6 +10947,13 @@ func (o StreamSourceConfigSqlServerSourceConfigOutput) ToStreamSourceConfigSqlSe }).(StreamSourceConfigSqlServerSourceConfigPtrOutput) } +// CDC reader reads from change tables. +func (o StreamSourceConfigSqlServerSourceConfigOutput) ChangeTables() StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput { + return o.ApplyT(func(v StreamSourceConfigSqlServerSourceConfig) *StreamSourceConfigSqlServerSourceConfigChangeTables { + return v.ChangeTables + }).(StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput) +} + // SQL Server objects to exclude from the stream. // Structure is documented below. func (o StreamSourceConfigSqlServerSourceConfigOutput) ExcludeObjects() StreamSourceConfigSqlServerSourceConfigExcludeObjectsPtrOutput { @@ -10965,6 +10980,13 @@ func (o StreamSourceConfigSqlServerSourceConfigOutput) MaxConcurrentCdcTasks() p return o.ApplyT(func(v StreamSourceConfigSqlServerSourceConfig) *int { return v.MaxConcurrentCdcTasks }).(pulumi.IntPtrOutput) } +// CDC reader reads from transaction logs. +func (o StreamSourceConfigSqlServerSourceConfigOutput) TransactionLogs() StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput { + return o.ApplyT(func(v StreamSourceConfigSqlServerSourceConfig) *StreamSourceConfigSqlServerSourceConfigTransactionLogs { + return v.TransactionLogs + }).(StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput) +} + type StreamSourceConfigSqlServerSourceConfigPtrOutput struct{ *pulumi.OutputState } func (StreamSourceConfigSqlServerSourceConfigPtrOutput) ElementType() reflect.Type { @@ -10989,6 +11011,16 @@ func (o StreamSourceConfigSqlServerSourceConfigPtrOutput) Elem() StreamSourceCon }).(StreamSourceConfigSqlServerSourceConfigOutput) } +// CDC reader reads from change tables. +func (o StreamSourceConfigSqlServerSourceConfigPtrOutput) ChangeTables() StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput { + return o.ApplyT(func(v *StreamSourceConfigSqlServerSourceConfig) *StreamSourceConfigSqlServerSourceConfigChangeTables { + if v == nil { + return nil + } + return v.ChangeTables + }).(StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput) +} + // SQL Server objects to exclude from the stream. // Structure is documented below. func (o StreamSourceConfigSqlServerSourceConfigPtrOutput) ExcludeObjects() StreamSourceConfigSqlServerSourceConfigExcludeObjectsPtrOutput { @@ -11031,6 +11063,134 @@ func (o StreamSourceConfigSqlServerSourceConfigPtrOutput) MaxConcurrentCdcTasks( }).(pulumi.IntPtrOutput) } +// CDC reader reads from transaction logs. +func (o StreamSourceConfigSqlServerSourceConfigPtrOutput) TransactionLogs() StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput { + return o.ApplyT(func(v *StreamSourceConfigSqlServerSourceConfig) *StreamSourceConfigSqlServerSourceConfigTransactionLogs { + if v == nil { + return nil + } + return v.TransactionLogs + }).(StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput) +} + +type StreamSourceConfigSqlServerSourceConfigChangeTables struct { +} + +// StreamSourceConfigSqlServerSourceConfigChangeTablesInput is an input type that accepts StreamSourceConfigSqlServerSourceConfigChangeTablesArgs and StreamSourceConfigSqlServerSourceConfigChangeTablesOutput values. +// You can construct a concrete instance of `StreamSourceConfigSqlServerSourceConfigChangeTablesInput` via: +// +// StreamSourceConfigSqlServerSourceConfigChangeTablesArgs{...} +type StreamSourceConfigSqlServerSourceConfigChangeTablesInput interface { + pulumi.Input + + ToStreamSourceConfigSqlServerSourceConfigChangeTablesOutput() StreamSourceConfigSqlServerSourceConfigChangeTablesOutput + ToStreamSourceConfigSqlServerSourceConfigChangeTablesOutputWithContext(context.Context) StreamSourceConfigSqlServerSourceConfigChangeTablesOutput +} + +type StreamSourceConfigSqlServerSourceConfigChangeTablesArgs struct { +} + +func (StreamSourceConfigSqlServerSourceConfigChangeTablesArgs) ElementType() reflect.Type { + return reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigChangeTables)(nil)).Elem() +} + +func (i StreamSourceConfigSqlServerSourceConfigChangeTablesArgs) ToStreamSourceConfigSqlServerSourceConfigChangeTablesOutput() StreamSourceConfigSqlServerSourceConfigChangeTablesOutput { + return i.ToStreamSourceConfigSqlServerSourceConfigChangeTablesOutputWithContext(context.Background()) +} + +func (i StreamSourceConfigSqlServerSourceConfigChangeTablesArgs) ToStreamSourceConfigSqlServerSourceConfigChangeTablesOutputWithContext(ctx context.Context) StreamSourceConfigSqlServerSourceConfigChangeTablesOutput { + return pulumi.ToOutputWithContext(ctx, i).(StreamSourceConfigSqlServerSourceConfigChangeTablesOutput) +} + +func (i StreamSourceConfigSqlServerSourceConfigChangeTablesArgs) ToStreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput() StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput { + return i.ToStreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutputWithContext(context.Background()) +} + +func (i StreamSourceConfigSqlServerSourceConfigChangeTablesArgs) ToStreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutputWithContext(ctx context.Context) StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(StreamSourceConfigSqlServerSourceConfigChangeTablesOutput).ToStreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutputWithContext(ctx) +} + +// StreamSourceConfigSqlServerSourceConfigChangeTablesPtrInput is an input type that accepts StreamSourceConfigSqlServerSourceConfigChangeTablesArgs, StreamSourceConfigSqlServerSourceConfigChangeTablesPtr and StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput values. +// You can construct a concrete instance of `StreamSourceConfigSqlServerSourceConfigChangeTablesPtrInput` via: +// +// StreamSourceConfigSqlServerSourceConfigChangeTablesArgs{...} +// +// or: +// +// nil +type StreamSourceConfigSqlServerSourceConfigChangeTablesPtrInput interface { + pulumi.Input + + ToStreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput() StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput + ToStreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutputWithContext(context.Context) StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput +} + +type streamSourceConfigSqlServerSourceConfigChangeTablesPtrType StreamSourceConfigSqlServerSourceConfigChangeTablesArgs + +func StreamSourceConfigSqlServerSourceConfigChangeTablesPtr(v *StreamSourceConfigSqlServerSourceConfigChangeTablesArgs) StreamSourceConfigSqlServerSourceConfigChangeTablesPtrInput { + return (*streamSourceConfigSqlServerSourceConfigChangeTablesPtrType)(v) +} + +func (*streamSourceConfigSqlServerSourceConfigChangeTablesPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**StreamSourceConfigSqlServerSourceConfigChangeTables)(nil)).Elem() +} + +func (i *streamSourceConfigSqlServerSourceConfigChangeTablesPtrType) ToStreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput() StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput { + return i.ToStreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutputWithContext(context.Background()) +} + +func (i *streamSourceConfigSqlServerSourceConfigChangeTablesPtrType) ToStreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutputWithContext(ctx context.Context) StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput) +} + +type StreamSourceConfigSqlServerSourceConfigChangeTablesOutput struct{ *pulumi.OutputState } + +func (StreamSourceConfigSqlServerSourceConfigChangeTablesOutput) ElementType() reflect.Type { + return reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigChangeTables)(nil)).Elem() +} + +func (o StreamSourceConfigSqlServerSourceConfigChangeTablesOutput) ToStreamSourceConfigSqlServerSourceConfigChangeTablesOutput() StreamSourceConfigSqlServerSourceConfigChangeTablesOutput { + return o +} + +func (o StreamSourceConfigSqlServerSourceConfigChangeTablesOutput) ToStreamSourceConfigSqlServerSourceConfigChangeTablesOutputWithContext(ctx context.Context) StreamSourceConfigSqlServerSourceConfigChangeTablesOutput { + return o +} + +func (o StreamSourceConfigSqlServerSourceConfigChangeTablesOutput) ToStreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput() StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput { + return o.ToStreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutputWithContext(context.Background()) +} + +func (o StreamSourceConfigSqlServerSourceConfigChangeTablesOutput) ToStreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutputWithContext(ctx context.Context) StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v StreamSourceConfigSqlServerSourceConfigChangeTables) *StreamSourceConfigSqlServerSourceConfigChangeTables { + return &v + }).(StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput) +} + +type StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput struct{ *pulumi.OutputState } + +func (StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**StreamSourceConfigSqlServerSourceConfigChangeTables)(nil)).Elem() +} + +func (o StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput) ToStreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput() StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput { + return o +} + +func (o StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput) ToStreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutputWithContext(ctx context.Context) StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput { + return o +} + +func (o StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput) Elem() StreamSourceConfigSqlServerSourceConfigChangeTablesOutput { + return o.ApplyT(func(v *StreamSourceConfigSqlServerSourceConfigChangeTables) StreamSourceConfigSqlServerSourceConfigChangeTables { + if v != nil { + return *v + } + var ret StreamSourceConfigSqlServerSourceConfigChangeTables + return ret + }).(StreamSourceConfigSqlServerSourceConfigChangeTablesOutput) +} + type StreamSourceConfigSqlServerSourceConfigExcludeObjects struct { // SQL Server schemas/databases in the database server // Structure is documented below. @@ -12147,6 +12307,124 @@ func (o StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableColumnAr }).(StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableColumnOutput) } +type StreamSourceConfigSqlServerSourceConfigTransactionLogs struct { +} + +// StreamSourceConfigSqlServerSourceConfigTransactionLogsInput is an input type that accepts StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs and StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput values. +// You can construct a concrete instance of `StreamSourceConfigSqlServerSourceConfigTransactionLogsInput` via: +// +// StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs{...} +type StreamSourceConfigSqlServerSourceConfigTransactionLogsInput interface { + pulumi.Input + + ToStreamSourceConfigSqlServerSourceConfigTransactionLogsOutput() StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput + ToStreamSourceConfigSqlServerSourceConfigTransactionLogsOutputWithContext(context.Context) StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput +} + +type StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs struct { +} + +func (StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs) ElementType() reflect.Type { + return reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigTransactionLogs)(nil)).Elem() +} + +func (i StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs) ToStreamSourceConfigSqlServerSourceConfigTransactionLogsOutput() StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput { + return i.ToStreamSourceConfigSqlServerSourceConfigTransactionLogsOutputWithContext(context.Background()) +} + +func (i StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs) ToStreamSourceConfigSqlServerSourceConfigTransactionLogsOutputWithContext(ctx context.Context) StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput { + return pulumi.ToOutputWithContext(ctx, i).(StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput) +} + +func (i StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs) ToStreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput() StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput { + return i.ToStreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutputWithContext(context.Background()) +} + +func (i StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs) ToStreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutputWithContext(ctx context.Context) StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput).ToStreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutputWithContext(ctx) +} + +// StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrInput is an input type that accepts StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs, StreamSourceConfigSqlServerSourceConfigTransactionLogsPtr and StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput values. +// You can construct a concrete instance of `StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrInput` via: +// +// StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs{...} +// +// or: +// +// nil +type StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrInput interface { + pulumi.Input + + ToStreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput() StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput + ToStreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutputWithContext(context.Context) StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput +} + +type streamSourceConfigSqlServerSourceConfigTransactionLogsPtrType StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs + +func StreamSourceConfigSqlServerSourceConfigTransactionLogsPtr(v *StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs) StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrInput { + return (*streamSourceConfigSqlServerSourceConfigTransactionLogsPtrType)(v) +} + +func (*streamSourceConfigSqlServerSourceConfigTransactionLogsPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**StreamSourceConfigSqlServerSourceConfigTransactionLogs)(nil)).Elem() +} + +func (i *streamSourceConfigSqlServerSourceConfigTransactionLogsPtrType) ToStreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput() StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput { + return i.ToStreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutputWithContext(context.Background()) +} + +func (i *streamSourceConfigSqlServerSourceConfigTransactionLogsPtrType) ToStreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutputWithContext(ctx context.Context) StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput) +} + +type StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput struct{ *pulumi.OutputState } + +func (StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput) ElementType() reflect.Type { + return reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigTransactionLogs)(nil)).Elem() +} + +func (o StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput) ToStreamSourceConfigSqlServerSourceConfigTransactionLogsOutput() StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput { + return o +} + +func (o StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput) ToStreamSourceConfigSqlServerSourceConfigTransactionLogsOutputWithContext(ctx context.Context) StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput { + return o +} + +func (o StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput) ToStreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput() StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput { + return o.ToStreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutputWithContext(context.Background()) +} + +func (o StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput) ToStreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutputWithContext(ctx context.Context) StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v StreamSourceConfigSqlServerSourceConfigTransactionLogs) *StreamSourceConfigSqlServerSourceConfigTransactionLogs { + return &v + }).(StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput) +} + +type StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput struct{ *pulumi.OutputState } + +func (StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**StreamSourceConfigSqlServerSourceConfigTransactionLogs)(nil)).Elem() +} + +func (o StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput) ToStreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput() StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput { + return o +} + +func (o StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput) ToStreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutputWithContext(ctx context.Context) StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput { + return o +} + +func (o StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput) Elem() StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput { + return o.ApplyT(func(v *StreamSourceConfigSqlServerSourceConfigTransactionLogs) StreamSourceConfigSqlServerSourceConfigTransactionLogs { + if v != nil { + return *v + } + var ret StreamSourceConfigSqlServerSourceConfigTransactionLogs + return ret + }).(StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput) +} + func init() { pulumi.RegisterInputType(reflect.TypeOf((*ConnectionProfileBigqueryProfileInput)(nil)).Elem(), ConnectionProfileBigqueryProfileArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ConnectionProfileBigqueryProfilePtrInput)(nil)).Elem(), ConnectionProfileBigqueryProfileArgs{}) @@ -12288,6 +12566,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArrayInput)(nil)).Elem(), StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArray{}) pulumi.RegisterInputType(reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigInput)(nil)).Elem(), StreamSourceConfigSqlServerSourceConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigPtrInput)(nil)).Elem(), StreamSourceConfigSqlServerSourceConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigChangeTablesInput)(nil)).Elem(), StreamSourceConfigSqlServerSourceConfigChangeTablesArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigChangeTablesPtrInput)(nil)).Elem(), StreamSourceConfigSqlServerSourceConfigChangeTablesArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigExcludeObjectsInput)(nil)).Elem(), StreamSourceConfigSqlServerSourceConfigExcludeObjectsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigExcludeObjectsPtrInput)(nil)).Elem(), StreamSourceConfigSqlServerSourceConfigExcludeObjectsArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemaInput)(nil)).Elem(), StreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemaArgs{}) @@ -12304,6 +12584,8 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArrayInput)(nil)).Elem(), StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArray{}) pulumi.RegisterInputType(reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableColumnInput)(nil)).Elem(), StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableColumnArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableColumnArrayInput)(nil)).Elem(), StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableColumnArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigTransactionLogsInput)(nil)).Elem(), StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrInput)(nil)).Elem(), StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs{}) pulumi.RegisterOutputType(ConnectionProfileBigqueryProfileOutput{}) pulumi.RegisterOutputType(ConnectionProfileBigqueryProfilePtrOutput{}) pulumi.RegisterOutputType(ConnectionProfileForwardSshConnectivityOutput{}) @@ -12444,6 +12726,8 @@ func init() { pulumi.RegisterOutputType(StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArrayOutput{}) pulumi.RegisterOutputType(StreamSourceConfigSqlServerSourceConfigOutput{}) pulumi.RegisterOutputType(StreamSourceConfigSqlServerSourceConfigPtrOutput{}) + pulumi.RegisterOutputType(StreamSourceConfigSqlServerSourceConfigChangeTablesOutput{}) + pulumi.RegisterOutputType(StreamSourceConfigSqlServerSourceConfigChangeTablesPtrOutput{}) pulumi.RegisterOutputType(StreamSourceConfigSqlServerSourceConfigExcludeObjectsOutput{}) pulumi.RegisterOutputType(StreamSourceConfigSqlServerSourceConfigExcludeObjectsPtrOutput{}) pulumi.RegisterOutputType(StreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemaOutput{}) @@ -12460,4 +12744,6 @@ func init() { pulumi.RegisterOutputType(StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArrayOutput{}) pulumi.RegisterOutputType(StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableColumnOutput{}) pulumi.RegisterOutputType(StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableColumnArrayOutput{}) + pulumi.RegisterOutputType(StreamSourceConfigSqlServerSourceConfigTransactionLogsOutput{}) + pulumi.RegisterOutputType(StreamSourceConfigSqlServerSourceConfigTransactionLogsPtrOutput{}) } diff --git a/sdk/go/gcp/datastream/stream.go b/sdk/go/gcp/datastream/stream.go index 4625295951..4894d2c0e7 100644 --- a/sdk/go/gcp/datastream/stream.go +++ b/sdk/go/gcp/datastream/stream.go @@ -634,6 +634,138 @@ import ( // }, // }, // }, +// TransactionLogs: nil, +// }, +// }, +// DestinationConfig: &datastream.StreamDestinationConfigArgs{ +// DestinationConnectionProfile: destination.ID(), +// BigqueryDestinationConfig: &datastream.StreamDestinationConfigBigqueryDestinationConfigArgs{ +// DataFreshness: pulumi.String("900s"), +// SourceHierarchyDatasets: &datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs{ +// DatasetTemplate: &datastream.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs{ +// Location: pulumi.String("us-central1"), +// }, +// }, +// }, +// }, +// BackfillNone: nil, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// ### Datastream Stream Sql Server Change Tables +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/datastream" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/sql" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// instance, err := sql.NewDatabaseInstance(ctx, "instance", &sql.DatabaseInstanceArgs{ +// Name: pulumi.String("sql-server"), +// DatabaseVersion: pulumi.String("SQLSERVER_2019_STANDARD"), +// Region: pulumi.String("us-central1"), +// RootPassword: pulumi.String("root-password"), +// DeletionProtection: pulumi.Bool(true), +// Settings: &sql.DatabaseInstanceSettingsArgs{ +// Tier: pulumi.String("db-custom-2-4096"), +// IpConfiguration: &sql.DatabaseInstanceSettingsIpConfigurationArgs{ +// AuthorizedNetworks: sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArray{ +// &sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{ +// Value: pulumi.String("34.71.242.81"), +// }, +// &sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{ +// Value: pulumi.String("34.72.28.29"), +// }, +// &sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{ +// Value: pulumi.String("34.67.6.157"), +// }, +// &sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{ +// Value: pulumi.String("34.67.234.134"), +// }, +// &sql.DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs{ +// Value: pulumi.String("34.72.239.218"), +// }, +// }, +// }, +// }, +// }) +// if err != nil { +// return err +// } +// user, err := sql.NewUser(ctx, "user", &sql.UserArgs{ +// Name: pulumi.String("user"), +// Instance: instance.Name, +// Password: pulumi.String("password"), +// }) +// if err != nil { +// return err +// } +// db, err := sql.NewDatabase(ctx, "db", &sql.DatabaseArgs{ +// Name: pulumi.String("db"), +// Instance: instance.Name, +// }, pulumi.DependsOn([]pulumi.Resource{ +// user, +// })) +// if err != nil { +// return err +// } +// source, err := datastream.NewConnectionProfile(ctx, "source", &datastream.ConnectionProfileArgs{ +// DisplayName: pulumi.String("SQL Server Source"), +// Location: pulumi.String("us-central1"), +// ConnectionProfileId: pulumi.String("source-profile"), +// SqlServerProfile: &datastream.ConnectionProfileSqlServerProfileArgs{ +// Hostname: instance.PublicIpAddress, +// Port: pulumi.Int(1433), +// Username: user.Name, +// Password: user.Password, +// Database: db.Name, +// }, +// }) +// if err != nil { +// return err +// } +// destination, err := datastream.NewConnectionProfile(ctx, "destination", &datastream.ConnectionProfileArgs{ +// DisplayName: pulumi.String("BigQuery Destination"), +// Location: pulumi.String("us-central1"), +// ConnectionProfileId: pulumi.String("destination-profile"), +// BigqueryProfile: nil, +// }) +// if err != nil { +// return err +// } +// _, err = datastream.NewStream(ctx, "default", &datastream.StreamArgs{ +// DisplayName: pulumi.String("SQL Server to BigQuery"), +// Location: pulumi.String("us-central1"), +// StreamId: pulumi.String("stream"), +// SourceConfig: &datastream.StreamSourceConfigArgs{ +// SourceConnectionProfile: source.ID(), +// SqlServerSourceConfig: &datastream.StreamSourceConfigSqlServerSourceConfigArgs{ +// IncludeObjects: &datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs{ +// Schemas: datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArray{ +// &datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArgs{ +// Schema: pulumi.String("schema"), +// Tables: datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArray{ +// &datastream.StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArgs{ +// Table: pulumi.String("table"), +// }, +// }, +// }, +// }, +// }, +// ChangeTables: nil, // }, // }, // DestinationConfig: &datastream.StreamDestinationConfigArgs{ @@ -1104,7 +1236,8 @@ type Stream struct { // A reference to a KMS encryption key. If provided, it will be used to encrypt the data. If left blank, data will be // encrypted using an internal Stream-specific encryption key provisioned through KMS. CustomerManagedEncryptionKey pulumi.StringPtrOutput `pulumi:"customerManagedEncryptionKey"` - // Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + // Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + // values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED DesiredState pulumi.StringPtrOutput `pulumi:"desiredState"` // Destination connection profile configuration. // Structure is documented below. @@ -1192,7 +1325,8 @@ type streamState struct { // A reference to a KMS encryption key. If provided, it will be used to encrypt the data. If left blank, data will be // encrypted using an internal Stream-specific encryption key provisioned through KMS. CustomerManagedEncryptionKey *string `pulumi:"customerManagedEncryptionKey"` - // Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + // Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + // values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED DesiredState *string `pulumi:"desiredState"` // Destination connection profile configuration. // Structure is documented below. @@ -1231,7 +1365,8 @@ type StreamState struct { // A reference to a KMS encryption key. If provided, it will be used to encrypt the data. If left blank, data will be // encrypted using an internal Stream-specific encryption key provisioned through KMS. CustomerManagedEncryptionKey pulumi.StringPtrInput - // Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + // Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + // values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED DesiredState pulumi.StringPtrInput // Destination connection profile configuration. // Structure is documented below. @@ -1274,7 +1409,8 @@ type streamArgs struct { // A reference to a KMS encryption key. If provided, it will be used to encrypt the data. If left blank, data will be // encrypted using an internal Stream-specific encryption key provisioned through KMS. CustomerManagedEncryptionKey *string `pulumi:"customerManagedEncryptionKey"` - // Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + // Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + // values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED DesiredState *string `pulumi:"desiredState"` // Destination connection profile configuration. // Structure is documented below. @@ -1305,7 +1441,8 @@ type StreamArgs struct { // A reference to a KMS encryption key. If provided, it will be used to encrypt the data. If left blank, data will be // encrypted using an internal Stream-specific encryption key provisioned through KMS. CustomerManagedEncryptionKey pulumi.StringPtrInput - // Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + // Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + // values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED DesiredState pulumi.StringPtrInput // Destination connection profile configuration. // Structure is documented below. @@ -1433,7 +1570,8 @@ func (o StreamOutput) CustomerManagedEncryptionKey() pulumi.StringPtrOutput { return o.ApplyT(func(v *Stream) pulumi.StringPtrOutput { return v.CustomerManagedEncryptionKey }).(pulumi.StringPtrOutput) } -// Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. +// Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible +// values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED func (o StreamOutput) DesiredState() pulumi.StringPtrOutput { return o.ApplyT(func(v *Stream) pulumi.StringPtrOutput { return v.DesiredState }).(pulumi.StringPtrOutput) } diff --git a/sdk/go/gcp/discoveryengine/dataStore.go b/sdk/go/gcp/discoveryengine/dataStore.go index 94da3f4528..563a3ff847 100644 --- a/sdk/go/gcp/discoveryengine/dataStore.go +++ b/sdk/go/gcp/discoveryengine/dataStore.go @@ -153,7 +153,7 @@ type DataStore struct { // Structure is documented below. DocumentProcessingConfig DataStoreDocumentProcessingConfigPtrOutput `pulumi:"documentProcessingConfig"` // The industry vertical that the data store registers. - // Possible values are: `GENERIC`, `MEDIA`. + // Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. IndustryVertical pulumi.StringOutput `pulumi:"industryVertical"` // The geographic location where the data store should reside. The value can // only be one of "global", "us" and "eu". @@ -175,7 +175,7 @@ type DataStore struct { // specified. SkipDefaultSchemaCreation pulumi.BoolPtrOutput `pulumi:"skipDefaultSchemaCreation"` // The solutions that the data store enrolls. - // Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + // Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. SolutionTypes pulumi.StringArrayOutput `pulumi:"solutionTypes"` } @@ -246,7 +246,7 @@ type dataStoreState struct { // Structure is documented below. DocumentProcessingConfig *DataStoreDocumentProcessingConfig `pulumi:"documentProcessingConfig"` // The industry vertical that the data store registers. - // Possible values are: `GENERIC`, `MEDIA`. + // Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. IndustryVertical *string `pulumi:"industryVertical"` // The geographic location where the data store should reside. The value can // only be one of "global", "us" and "eu". @@ -268,7 +268,7 @@ type dataStoreState struct { // specified. SkipDefaultSchemaCreation *bool `pulumi:"skipDefaultSchemaCreation"` // The solutions that the data store enrolls. - // Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + // Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. SolutionTypes []string `pulumi:"solutionTypes"` } @@ -295,7 +295,7 @@ type DataStoreState struct { // Structure is documented below. DocumentProcessingConfig DataStoreDocumentProcessingConfigPtrInput // The industry vertical that the data store registers. - // Possible values are: `GENERIC`, `MEDIA`. + // Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. IndustryVertical pulumi.StringPtrInput // The geographic location where the data store should reside. The value can // only be one of "global", "us" and "eu". @@ -317,7 +317,7 @@ type DataStoreState struct { // specified. SkipDefaultSchemaCreation pulumi.BoolPtrInput // The solutions that the data store enrolls. - // Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + // Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. SolutionTypes pulumi.StringArrayInput } @@ -344,7 +344,7 @@ type dataStoreArgs struct { // Structure is documented below. DocumentProcessingConfig *DataStoreDocumentProcessingConfig `pulumi:"documentProcessingConfig"` // The industry vertical that the data store registers. - // Possible values are: `GENERIC`, `MEDIA`. + // Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. IndustryVertical string `pulumi:"industryVertical"` // The geographic location where the data store should reside. The value can // only be one of "global", "us" and "eu". @@ -361,7 +361,7 @@ type dataStoreArgs struct { // specified. SkipDefaultSchemaCreation *bool `pulumi:"skipDefaultSchemaCreation"` // The solutions that the data store enrolls. - // Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + // Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. SolutionTypes []string `pulumi:"solutionTypes"` } @@ -385,7 +385,7 @@ type DataStoreArgs struct { // Structure is documented below. DocumentProcessingConfig DataStoreDocumentProcessingConfigPtrInput // The industry vertical that the data store registers. - // Possible values are: `GENERIC`, `MEDIA`. + // Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. IndustryVertical pulumi.StringInput // The geographic location where the data store should reside. The value can // only be one of "global", "us" and "eu". @@ -402,7 +402,7 @@ type DataStoreArgs struct { // specified. SkipDefaultSchemaCreation pulumi.BoolPtrInput // The solutions that the data store enrolls. - // Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + // Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. SolutionTypes pulumi.StringArrayInput } @@ -536,7 +536,7 @@ func (o DataStoreOutput) DocumentProcessingConfig() DataStoreDocumentProcessingC } // The industry vertical that the data store registers. -// Possible values are: `GENERIC`, `MEDIA`. +// Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. func (o DataStoreOutput) IndustryVertical() pulumi.StringOutput { return o.ApplyT(func(v *DataStore) pulumi.StringOutput { return v.IndustryVertical }).(pulumi.StringOutput) } @@ -573,7 +573,7 @@ func (o DataStoreOutput) SkipDefaultSchemaCreation() pulumi.BoolPtrOutput { } // The solutions that the data store enrolls. -// Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. +// Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. func (o DataStoreOutput) SolutionTypes() pulumi.StringArrayOutput { return o.ApplyT(func(v *DataStore) pulumi.StringArrayOutput { return v.SolutionTypes }).(pulumi.StringArrayOutput) } diff --git a/sdk/go/gcp/discoveryengine/pulumiTypes.go b/sdk/go/gcp/discoveryengine/pulumiTypes.go index 14553ecf06..abce7f3e8e 100644 --- a/sdk/go/gcp/discoveryengine/pulumiTypes.go +++ b/sdk/go/gcp/discoveryengine/pulumiTypes.go @@ -596,6 +596,9 @@ func (o ChatEngineCommonConfigPtrOutput) CompanyName() pulumi.StringPtrOutput { } type DataStoreDocumentProcessingConfig struct { + // Whether chunking mode is enabled. + // Structure is documented below. + ChunkingConfig *DataStoreDocumentProcessingConfigChunkingConfig `pulumi:"chunkingConfig"` // Configurations for default Document parser. If not specified, this resource // will be configured to use a default DigitalParsingConfig, and the default parsing // config will be applied to all file types for Document parsing. @@ -621,6 +624,9 @@ type DataStoreDocumentProcessingConfigInput interface { } type DataStoreDocumentProcessingConfigArgs struct { + // Whether chunking mode is enabled. + // Structure is documented below. + ChunkingConfig DataStoreDocumentProcessingConfigChunkingConfigPtrInput `pulumi:"chunkingConfig"` // Configurations for default Document parser. If not specified, this resource // will be configured to use a default DigitalParsingConfig, and the default parsing // config will be applied to all file types for Document parsing. @@ -711,6 +717,14 @@ func (o DataStoreDocumentProcessingConfigOutput) ToDataStoreDocumentProcessingCo }).(DataStoreDocumentProcessingConfigPtrOutput) } +// Whether chunking mode is enabled. +// Structure is documented below. +func (o DataStoreDocumentProcessingConfigOutput) ChunkingConfig() DataStoreDocumentProcessingConfigChunkingConfigPtrOutput { + return o.ApplyT(func(v DataStoreDocumentProcessingConfig) *DataStoreDocumentProcessingConfigChunkingConfig { + return v.ChunkingConfig + }).(DataStoreDocumentProcessingConfigChunkingConfigPtrOutput) +} + // Configurations for default Document parser. If not specified, this resource // will be configured to use a default DigitalParsingConfig, and the default parsing // config will be applied to all file types for Document parsing. @@ -759,6 +773,17 @@ func (o DataStoreDocumentProcessingConfigPtrOutput) Elem() DataStoreDocumentProc }).(DataStoreDocumentProcessingConfigOutput) } +// Whether chunking mode is enabled. +// Structure is documented below. +func (o DataStoreDocumentProcessingConfigPtrOutput) ChunkingConfig() DataStoreDocumentProcessingConfigChunkingConfigPtrOutput { + return o.ApplyT(func(v *DataStoreDocumentProcessingConfig) *DataStoreDocumentProcessingConfigChunkingConfig { + if v == nil { + return nil + } + return v.ChunkingConfig + }).(DataStoreDocumentProcessingConfigChunkingConfigPtrOutput) +} + // Configurations for default Document parser. If not specified, this resource // will be configured to use a default DigitalParsingConfig, and the default parsing // config will be applied to all file types for Document parsing. @@ -794,9 +819,322 @@ func (o DataStoreDocumentProcessingConfigPtrOutput) ParsingConfigOverrides() Dat }).(DataStoreDocumentProcessingConfigParsingConfigOverrideArrayOutput) } +type DataStoreDocumentProcessingConfigChunkingConfig struct { + // Configuration for the layout based chunking. + // Structure is documented below. + LayoutBasedChunkingConfig *DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig `pulumi:"layoutBasedChunkingConfig"` +} + +// DataStoreDocumentProcessingConfigChunkingConfigInput is an input type that accepts DataStoreDocumentProcessingConfigChunkingConfigArgs and DataStoreDocumentProcessingConfigChunkingConfigOutput values. +// You can construct a concrete instance of `DataStoreDocumentProcessingConfigChunkingConfigInput` via: +// +// DataStoreDocumentProcessingConfigChunkingConfigArgs{...} +type DataStoreDocumentProcessingConfigChunkingConfigInput interface { + pulumi.Input + + ToDataStoreDocumentProcessingConfigChunkingConfigOutput() DataStoreDocumentProcessingConfigChunkingConfigOutput + ToDataStoreDocumentProcessingConfigChunkingConfigOutputWithContext(context.Context) DataStoreDocumentProcessingConfigChunkingConfigOutput +} + +type DataStoreDocumentProcessingConfigChunkingConfigArgs struct { + // Configuration for the layout based chunking. + // Structure is documented below. + LayoutBasedChunkingConfig DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrInput `pulumi:"layoutBasedChunkingConfig"` +} + +func (DataStoreDocumentProcessingConfigChunkingConfigArgs) ElementType() reflect.Type { + return reflect.TypeOf((*DataStoreDocumentProcessingConfigChunkingConfig)(nil)).Elem() +} + +func (i DataStoreDocumentProcessingConfigChunkingConfigArgs) ToDataStoreDocumentProcessingConfigChunkingConfigOutput() DataStoreDocumentProcessingConfigChunkingConfigOutput { + return i.ToDataStoreDocumentProcessingConfigChunkingConfigOutputWithContext(context.Background()) +} + +func (i DataStoreDocumentProcessingConfigChunkingConfigArgs) ToDataStoreDocumentProcessingConfigChunkingConfigOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigChunkingConfigOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataStoreDocumentProcessingConfigChunkingConfigOutput) +} + +func (i DataStoreDocumentProcessingConfigChunkingConfigArgs) ToDataStoreDocumentProcessingConfigChunkingConfigPtrOutput() DataStoreDocumentProcessingConfigChunkingConfigPtrOutput { + return i.ToDataStoreDocumentProcessingConfigChunkingConfigPtrOutputWithContext(context.Background()) +} + +func (i DataStoreDocumentProcessingConfigChunkingConfigArgs) ToDataStoreDocumentProcessingConfigChunkingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigChunkingConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataStoreDocumentProcessingConfigChunkingConfigOutput).ToDataStoreDocumentProcessingConfigChunkingConfigPtrOutputWithContext(ctx) +} + +// DataStoreDocumentProcessingConfigChunkingConfigPtrInput is an input type that accepts DataStoreDocumentProcessingConfigChunkingConfigArgs, DataStoreDocumentProcessingConfigChunkingConfigPtr and DataStoreDocumentProcessingConfigChunkingConfigPtrOutput values. +// You can construct a concrete instance of `DataStoreDocumentProcessingConfigChunkingConfigPtrInput` via: +// +// DataStoreDocumentProcessingConfigChunkingConfigArgs{...} +// +// or: +// +// nil +type DataStoreDocumentProcessingConfigChunkingConfigPtrInput interface { + pulumi.Input + + ToDataStoreDocumentProcessingConfigChunkingConfigPtrOutput() DataStoreDocumentProcessingConfigChunkingConfigPtrOutput + ToDataStoreDocumentProcessingConfigChunkingConfigPtrOutputWithContext(context.Context) DataStoreDocumentProcessingConfigChunkingConfigPtrOutput +} + +type dataStoreDocumentProcessingConfigChunkingConfigPtrType DataStoreDocumentProcessingConfigChunkingConfigArgs + +func DataStoreDocumentProcessingConfigChunkingConfigPtr(v *DataStoreDocumentProcessingConfigChunkingConfigArgs) DataStoreDocumentProcessingConfigChunkingConfigPtrInput { + return (*dataStoreDocumentProcessingConfigChunkingConfigPtrType)(v) +} + +func (*dataStoreDocumentProcessingConfigChunkingConfigPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**DataStoreDocumentProcessingConfigChunkingConfig)(nil)).Elem() +} + +func (i *dataStoreDocumentProcessingConfigChunkingConfigPtrType) ToDataStoreDocumentProcessingConfigChunkingConfigPtrOutput() DataStoreDocumentProcessingConfigChunkingConfigPtrOutput { + return i.ToDataStoreDocumentProcessingConfigChunkingConfigPtrOutputWithContext(context.Background()) +} + +func (i *dataStoreDocumentProcessingConfigChunkingConfigPtrType) ToDataStoreDocumentProcessingConfigChunkingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigChunkingConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataStoreDocumentProcessingConfigChunkingConfigPtrOutput) +} + +type DataStoreDocumentProcessingConfigChunkingConfigOutput struct{ *pulumi.OutputState } + +func (DataStoreDocumentProcessingConfigChunkingConfigOutput) ElementType() reflect.Type { + return reflect.TypeOf((*DataStoreDocumentProcessingConfigChunkingConfig)(nil)).Elem() +} + +func (o DataStoreDocumentProcessingConfigChunkingConfigOutput) ToDataStoreDocumentProcessingConfigChunkingConfigOutput() DataStoreDocumentProcessingConfigChunkingConfigOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigChunkingConfigOutput) ToDataStoreDocumentProcessingConfigChunkingConfigOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigChunkingConfigOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigChunkingConfigOutput) ToDataStoreDocumentProcessingConfigChunkingConfigPtrOutput() DataStoreDocumentProcessingConfigChunkingConfigPtrOutput { + return o.ToDataStoreDocumentProcessingConfigChunkingConfigPtrOutputWithContext(context.Background()) +} + +func (o DataStoreDocumentProcessingConfigChunkingConfigOutput) ToDataStoreDocumentProcessingConfigChunkingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigChunkingConfigPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v DataStoreDocumentProcessingConfigChunkingConfig) *DataStoreDocumentProcessingConfigChunkingConfig { + return &v + }).(DataStoreDocumentProcessingConfigChunkingConfigPtrOutput) +} + +// Configuration for the layout based chunking. +// Structure is documented below. +func (o DataStoreDocumentProcessingConfigChunkingConfigOutput) LayoutBasedChunkingConfig() DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput { + return o.ApplyT(func(v DataStoreDocumentProcessingConfigChunkingConfig) *DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig { + return v.LayoutBasedChunkingConfig + }).(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput) +} + +type DataStoreDocumentProcessingConfigChunkingConfigPtrOutput struct{ *pulumi.OutputState } + +func (DataStoreDocumentProcessingConfigChunkingConfigPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**DataStoreDocumentProcessingConfigChunkingConfig)(nil)).Elem() +} + +func (o DataStoreDocumentProcessingConfigChunkingConfigPtrOutput) ToDataStoreDocumentProcessingConfigChunkingConfigPtrOutput() DataStoreDocumentProcessingConfigChunkingConfigPtrOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigChunkingConfigPtrOutput) ToDataStoreDocumentProcessingConfigChunkingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigChunkingConfigPtrOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigChunkingConfigPtrOutput) Elem() DataStoreDocumentProcessingConfigChunkingConfigOutput { + return o.ApplyT(func(v *DataStoreDocumentProcessingConfigChunkingConfig) DataStoreDocumentProcessingConfigChunkingConfig { + if v != nil { + return *v + } + var ret DataStoreDocumentProcessingConfigChunkingConfig + return ret + }).(DataStoreDocumentProcessingConfigChunkingConfigOutput) +} + +// Configuration for the layout based chunking. +// Structure is documented below. +func (o DataStoreDocumentProcessingConfigChunkingConfigPtrOutput) LayoutBasedChunkingConfig() DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput { + return o.ApplyT(func(v *DataStoreDocumentProcessingConfigChunkingConfig) *DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig { + if v == nil { + return nil + } + return v.LayoutBasedChunkingConfig + }).(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput) +} + +type DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig struct { + // The token size limit for each chunk. + // Supported values: 100-500 (inclusive). Default value: 500. + ChunkSize *int `pulumi:"chunkSize"` + // Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + // Default value: False. + IncludeAncestorHeadings *bool `pulumi:"includeAncestorHeadings"` +} + +// DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigInput is an input type that accepts DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs and DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput values. +// You can construct a concrete instance of `DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigInput` via: +// +// DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs{...} +type DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigInput interface { + pulumi.Input + + ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput() DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput + ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutputWithContext(context.Context) DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput +} + +type DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs struct { + // The token size limit for each chunk. + // Supported values: 100-500 (inclusive). Default value: 500. + ChunkSize pulumi.IntPtrInput `pulumi:"chunkSize"` + // Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + // Default value: False. + IncludeAncestorHeadings pulumi.BoolPtrInput `pulumi:"includeAncestorHeadings"` +} + +func (DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs) ElementType() reflect.Type { + return reflect.TypeOf((*DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig)(nil)).Elem() +} + +func (i DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs) ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput() DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput { + return i.ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutputWithContext(context.Background()) +} + +func (i DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs) ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput) +} + +func (i DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs) ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput() DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput { + return i.ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutputWithContext(context.Background()) +} + +func (i DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs) ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput).ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutputWithContext(ctx) +} + +// DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrInput is an input type that accepts DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs, DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtr and DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput values. +// You can construct a concrete instance of `DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrInput` via: +// +// DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs{...} +// +// or: +// +// nil +type DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrInput interface { + pulumi.Input + + ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput() DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput + ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutputWithContext(context.Context) DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput +} + +type dataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrType DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs + +func DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtr(v *DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs) DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrInput { + return (*dataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrType)(v) +} + +func (*dataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig)(nil)).Elem() +} + +func (i *dataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrType) ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput() DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput { + return i.ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutputWithContext(context.Background()) +} + +func (i *dataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrType) ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput) +} + +type DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput struct{ *pulumi.OutputState } + +func (DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput) ElementType() reflect.Type { + return reflect.TypeOf((*DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig)(nil)).Elem() +} + +func (o DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput) ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput() DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput) ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput) ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput() DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput { + return o.ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutputWithContext(context.Background()) +} + +func (o DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput) ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig) *DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig { + return &v + }).(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput) +} + +// The token size limit for each chunk. +// Supported values: 100-500 (inclusive). Default value: 500. +func (o DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput) ChunkSize() pulumi.IntPtrOutput { + return o.ApplyT(func(v DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig) *int { + return v.ChunkSize + }).(pulumi.IntPtrOutput) +} + +// Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. +// Default value: False. +func (o DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput) IncludeAncestorHeadings() pulumi.BoolPtrOutput { + return o.ApplyT(func(v DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig) *bool { + return v.IncludeAncestorHeadings + }).(pulumi.BoolPtrOutput) +} + +type DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput struct{ *pulumi.OutputState } + +func (DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig)(nil)).Elem() +} + +func (o DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput) ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput() DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput) ToDataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput) Elem() DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput { + return o.ApplyT(func(v *DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig) DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig { + if v != nil { + return *v + } + var ret DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig + return ret + }).(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput) +} + +// The token size limit for each chunk. +// Supported values: 100-500 (inclusive). Default value: 500. +func (o DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput) ChunkSize() pulumi.IntPtrOutput { + return o.ApplyT(func(v *DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig) *int { + if v == nil { + return nil + } + return v.ChunkSize + }).(pulumi.IntPtrOutput) +} + +// Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. +// Default value: False. +func (o DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput) IncludeAncestorHeadings() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig) *bool { + if v == nil { + return nil + } + return v.IncludeAncestorHeadings + }).(pulumi.BoolPtrOutput) +} + type DataStoreDocumentProcessingConfigDefaultParsingConfig struct { // Configurations applied to digital parser. DigitalParsingConfig *DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig `pulumi:"digitalParsingConfig"` + // Configurations applied to layout parser. + LayoutParsingConfig *DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig `pulumi:"layoutParsingConfig"` // Configurations applied to OCR parser. Currently it only applies to PDFs. // Structure is documented below. OcrParsingConfig *DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig `pulumi:"ocrParsingConfig"` @@ -816,6 +1154,8 @@ type DataStoreDocumentProcessingConfigDefaultParsingConfigInput interface { type DataStoreDocumentProcessingConfigDefaultParsingConfigArgs struct { // Configurations applied to digital parser. DigitalParsingConfig DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigPtrInput `pulumi:"digitalParsingConfig"` + // Configurations applied to layout parser. + LayoutParsingConfig DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrInput `pulumi:"layoutParsingConfig"` // Configurations applied to OCR parser. Currently it only applies to PDFs. // Structure is documented below. OcrParsingConfig DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigPtrInput `pulumi:"ocrParsingConfig"` @@ -905,6 +1245,13 @@ func (o DataStoreDocumentProcessingConfigDefaultParsingConfigOutput) DigitalPars }).(DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigPtrOutput) } +// Configurations applied to layout parser. +func (o DataStoreDocumentProcessingConfigDefaultParsingConfigOutput) LayoutParsingConfig() DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput { + return o.ApplyT(func(v DataStoreDocumentProcessingConfigDefaultParsingConfig) *DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig { + return v.LayoutParsingConfig + }).(DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput) +} + // Configurations applied to OCR parser. Currently it only applies to PDFs. // Structure is documented below. func (o DataStoreDocumentProcessingConfigDefaultParsingConfigOutput) OcrParsingConfig() DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigPtrOutput { @@ -947,6 +1294,16 @@ func (o DataStoreDocumentProcessingConfigDefaultParsingConfigPtrOutput) DigitalP }).(DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigPtrOutput) } +// Configurations applied to layout parser. +func (o DataStoreDocumentProcessingConfigDefaultParsingConfigPtrOutput) LayoutParsingConfig() DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput { + return o.ApplyT(func(v *DataStoreDocumentProcessingConfigDefaultParsingConfig) *DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig { + if v == nil { + return nil + } + return v.LayoutParsingConfig + }).(DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput) +} + // Configurations applied to OCR parser. Currently it only applies to PDFs. // Structure is documented below. func (o DataStoreDocumentProcessingConfigDefaultParsingConfigPtrOutput) OcrParsingConfig() DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigPtrOutput { @@ -1076,6 +1433,124 @@ func (o DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfi }).(DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigOutput) } +type DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig struct { +} + +// DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigInput is an input type that accepts DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs and DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput values. +// You can construct a concrete instance of `DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigInput` via: +// +// DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs{...} +type DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigInput interface { + pulumi.Input + + ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput() DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput + ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutputWithContext(context.Context) DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput +} + +type DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs struct { +} + +func (DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs) ElementType() reflect.Type { + return reflect.TypeOf((*DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig)(nil)).Elem() +} + +func (i DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs) ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput() DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput { + return i.ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutputWithContext(context.Background()) +} + +func (i DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs) ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput) +} + +func (i DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs) ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput() DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput { + return i.ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutputWithContext(context.Background()) +} + +func (i DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs) ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput).ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutputWithContext(ctx) +} + +// DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrInput is an input type that accepts DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs, DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtr and DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput values. +// You can construct a concrete instance of `DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrInput` via: +// +// DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs{...} +// +// or: +// +// nil +type DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrInput interface { + pulumi.Input + + ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput() DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput + ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutputWithContext(context.Context) DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput +} + +type dataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrType DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs + +func DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtr(v *DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs) DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrInput { + return (*dataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrType)(v) +} + +func (*dataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig)(nil)).Elem() +} + +func (i *dataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrType) ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput() DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput { + return i.ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutputWithContext(context.Background()) +} + +func (i *dataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrType) ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput) +} + +type DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput struct{ *pulumi.OutputState } + +func (DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput) ElementType() reflect.Type { + return reflect.TypeOf((*DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig)(nil)).Elem() +} + +func (o DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput) ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput() DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput) ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput) ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput() DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput { + return o.ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutputWithContext(context.Background()) +} + +func (o DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput) ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig) *DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig { + return &v + }).(DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput) +} + +type DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput struct{ *pulumi.OutputState } + +func (DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig)(nil)).Elem() +} + +func (o DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput) ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput() DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput) ToDataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput) Elem() DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput { + return o.ApplyT(func(v *DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig) DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig { + if v != nil { + return *v + } + var ret DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig + return ret + }).(DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput) +} + type DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig struct { // If true, will use native text instead of OCR text on pages containing native text. UseNativeText *bool `pulumi:"useNativeText"` @@ -1220,6 +1695,8 @@ type DataStoreDocumentProcessingConfigParsingConfigOverride struct { DigitalParsingConfig *DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfig `pulumi:"digitalParsingConfig"` // The identifier for this object. Format specified above. FileType string `pulumi:"fileType"` + // Configurations applied to layout parser. + LayoutParsingConfig *DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig `pulumi:"layoutParsingConfig"` // Configurations applied to OCR parser. Currently it only applies to PDFs. // Structure is documented below. OcrParsingConfig *DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig `pulumi:"ocrParsingConfig"` @@ -1241,6 +1718,8 @@ type DataStoreDocumentProcessingConfigParsingConfigOverrideArgs struct { DigitalParsingConfig DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigPtrInput `pulumi:"digitalParsingConfig"` // The identifier for this object. Format specified above. FileType pulumi.StringInput `pulumi:"fileType"` + // Configurations applied to layout parser. + LayoutParsingConfig DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrInput `pulumi:"layoutParsingConfig"` // Configurations applied to OCR parser. Currently it only applies to PDFs. // Structure is documented below. OcrParsingConfig DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigPtrInput `pulumi:"ocrParsingConfig"` @@ -1309,6 +1788,13 @@ func (o DataStoreDocumentProcessingConfigParsingConfigOverrideOutput) FileType() return o.ApplyT(func(v DataStoreDocumentProcessingConfigParsingConfigOverride) string { return v.FileType }).(pulumi.StringOutput) } +// Configurations applied to layout parser. +func (o DataStoreDocumentProcessingConfigParsingConfigOverrideOutput) LayoutParsingConfig() DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput { + return o.ApplyT(func(v DataStoreDocumentProcessingConfigParsingConfigOverride) *DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig { + return v.LayoutParsingConfig + }).(DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput) +} + // Configurations applied to OCR parser. Currently it only applies to PDFs. // Structure is documented below. func (o DataStoreDocumentProcessingConfigParsingConfigOverrideOutput) OcrParsingConfig() DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigPtrOutput { @@ -1455,6 +1941,124 @@ func (o DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConf }).(DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigOutput) } +type DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig struct { +} + +// DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigInput is an input type that accepts DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs and DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput values. +// You can construct a concrete instance of `DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigInput` via: +// +// DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs{...} +type DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigInput interface { + pulumi.Input + + ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput() DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput + ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutputWithContext(context.Context) DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput +} + +type DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs struct { +} + +func (DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs) ElementType() reflect.Type { + return reflect.TypeOf((*DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig)(nil)).Elem() +} + +func (i DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs) ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput() DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput { + return i.ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutputWithContext(context.Background()) +} + +func (i DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs) ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput) +} + +func (i DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs) ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput() DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput { + return i.ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutputWithContext(context.Background()) +} + +func (i DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs) ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput).ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutputWithContext(ctx) +} + +// DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrInput is an input type that accepts DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs, DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtr and DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput values. +// You can construct a concrete instance of `DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrInput` via: +// +// DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs{...} +// +// or: +// +// nil +type DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrInput interface { + pulumi.Input + + ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput() DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput + ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutputWithContext(context.Context) DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput +} + +type dataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrType DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs + +func DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtr(v *DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs) DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrInput { + return (*dataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrType)(v) +} + +func (*dataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig)(nil)).Elem() +} + +func (i *dataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrType) ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput() DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput { + return i.ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutputWithContext(context.Background()) +} + +func (i *dataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrType) ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput) +} + +type DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput struct{ *pulumi.OutputState } + +func (DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput) ElementType() reflect.Type { + return reflect.TypeOf((*DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig)(nil)).Elem() +} + +func (o DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput) ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput() DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput) ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput) ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput() DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput { + return o.ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutputWithContext(context.Background()) +} + +func (o DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput) ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig) *DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig { + return &v + }).(DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput) +} + +type DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput struct{ *pulumi.OutputState } + +func (DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig)(nil)).Elem() +} + +func (o DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput) ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput() DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput) ToDataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutputWithContext(ctx context.Context) DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput { + return o +} + +func (o DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput) Elem() DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput { + return o.ApplyT(func(v *DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig) DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig { + if v != nil { + return *v + } + var ret DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig + return ret + }).(DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput) +} + type DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig struct { // If true, will use native text instead of OCR text on pages containing native text. UseNativeText *bool `pulumi:"useNativeText"` @@ -1918,16 +2522,24 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*ChatEngineCommonConfigPtrInput)(nil)).Elem(), ChatEngineCommonConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigInput)(nil)).Elem(), DataStoreDocumentProcessingConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigPtrInput)(nil)).Elem(), DataStoreDocumentProcessingConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigChunkingConfigInput)(nil)).Elem(), DataStoreDocumentProcessingConfigChunkingConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigChunkingConfigPtrInput)(nil)).Elem(), DataStoreDocumentProcessingConfigChunkingConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigInput)(nil)).Elem(), DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrInput)(nil)).Elem(), DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigDefaultParsingConfigInput)(nil)).Elem(), DataStoreDocumentProcessingConfigDefaultParsingConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigDefaultParsingConfigPtrInput)(nil)).Elem(), DataStoreDocumentProcessingConfigDefaultParsingConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigInput)(nil)).Elem(), DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigPtrInput)(nil)).Elem(), DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigInput)(nil)).Elem(), DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrInput)(nil)).Elem(), DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigInput)(nil)).Elem(), DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigPtrInput)(nil)).Elem(), DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigParsingConfigOverrideInput)(nil)).Elem(), DataStoreDocumentProcessingConfigParsingConfigOverrideArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigParsingConfigOverrideArrayInput)(nil)).Elem(), DataStoreDocumentProcessingConfigParsingConfigOverrideArray{}) pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigInput)(nil)).Elem(), DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigPtrInput)(nil)).Elem(), DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigInput)(nil)).Elem(), DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrInput)(nil)).Elem(), DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigInput)(nil)).Elem(), DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigPtrInput)(nil)).Elem(), DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*SearchEngineCommonConfigInput)(nil)).Elem(), SearchEngineCommonConfigArgs{}) @@ -1944,16 +2556,24 @@ func init() { pulumi.RegisterOutputType(ChatEngineCommonConfigPtrOutput{}) pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigOutput{}) pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigPtrOutput{}) + pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigChunkingConfigOutput{}) + pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigChunkingConfigPtrOutput{}) + pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigOutput{}) + pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigPtrOutput{}) pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigDefaultParsingConfigOutput{}) pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigDefaultParsingConfigPtrOutput{}) pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigOutput{}) pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigPtrOutput{}) + pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigOutput{}) + pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigPtrOutput{}) pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigOutput{}) pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigPtrOutput{}) pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigParsingConfigOverrideOutput{}) pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigParsingConfigOverrideArrayOutput{}) pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigOutput{}) pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigPtrOutput{}) + pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigOutput{}) + pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigPtrOutput{}) pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigOutput{}) pulumi.RegisterOutputType(DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigPtrOutput{}) pulumi.RegisterOutputType(SearchEngineCommonConfigOutput{}) diff --git a/sdk/go/gcp/firebase/databaseInstance.go b/sdk/go/gcp/firebase/databaseInstance.go index 7a7cc76608..04b48b9777 100644 --- a/sdk/go/gcp/firebase/databaseInstance.go +++ b/sdk/go/gcp/firebase/databaseInstance.go @@ -163,7 +163,7 @@ type DatabaseInstance struct { // The database URL in the form of https://{instance-id}.firebaseio.com for us-central1 instances // or https://{instance-id}.{region}.firebasedatabase.app in other regions. DatabaseUrl pulumi.StringOutput `pulumi:"databaseUrl"` - // The intended database state. + // The intended database state. Possible values: ACTIVE, DISABLED. DesiredState pulumi.StringPtrOutput `pulumi:"desiredState"` // The globally unique identifier of the Firebase Realtime Database instance. // Instance IDs cannot be reused after deletion. @@ -231,7 +231,7 @@ type databaseInstanceState struct { // The database URL in the form of https://{instance-id}.firebaseio.com for us-central1 instances // or https://{instance-id}.{region}.firebasedatabase.app in other regions. DatabaseUrl *string `pulumi:"databaseUrl"` - // The intended database state. + // The intended database state. Possible values: ACTIVE, DISABLED. DesiredState *string `pulumi:"desiredState"` // The globally unique identifier of the Firebase Realtime Database instance. // Instance IDs cannot be reused after deletion. @@ -264,7 +264,7 @@ type DatabaseInstanceState struct { // The database URL in the form of https://{instance-id}.firebaseio.com for us-central1 instances // or https://{instance-id}.{region}.firebasedatabase.app in other regions. DatabaseUrl pulumi.StringPtrInput - // The intended database state. + // The intended database state. Possible values: ACTIVE, DISABLED. DesiredState pulumi.StringPtrInput // The globally unique identifier of the Firebase Realtime Database instance. // Instance IDs cannot be reused after deletion. @@ -298,7 +298,7 @@ func (DatabaseInstanceState) ElementType() reflect.Type { } type databaseInstanceArgs struct { - // The intended database state. + // The intended database state. Possible values: ACTIVE, DISABLED. DesiredState *string `pulumi:"desiredState"` // The globally unique identifier of the Firebase Realtime Database instance. // Instance IDs cannot be reused after deletion. @@ -322,7 +322,7 @@ type databaseInstanceArgs struct { // The set of arguments for constructing a DatabaseInstance resource. type DatabaseInstanceArgs struct { - // The intended database state. + // The intended database state. Possible values: ACTIVE, DISABLED. DesiredState pulumi.StringPtrInput // The globally unique identifier of the Firebase Realtime Database instance. // Instance IDs cannot be reused after deletion. @@ -437,7 +437,7 @@ func (o DatabaseInstanceOutput) DatabaseUrl() pulumi.StringOutput { return o.ApplyT(func(v *DatabaseInstance) pulumi.StringOutput { return v.DatabaseUrl }).(pulumi.StringOutput) } -// The intended database state. +// The intended database state. Possible values: ACTIVE, DISABLED. func (o DatabaseInstanceOutput) DesiredState() pulumi.StringPtrOutput { return o.ApplyT(func(v *DatabaseInstance) pulumi.StringPtrOutput { return v.DesiredState }).(pulumi.StringPtrOutput) } diff --git a/sdk/go/gcp/gkehub/featureMembership.go b/sdk/go/gcp/gkehub/featureMembership.go index 2ccfd028cf..f5bfb6ee70 100644 --- a/sdk/go/gcp/gkehub/featureMembership.go +++ b/sdk/go/gcp/gkehub/featureMembership.go @@ -69,8 +69,9 @@ import ( // Feature: feature.Name, // Membership: membership.MembershipId, // Configmanagement: &gkehub.FeatureMembershipConfigmanagementArgs{ -// Version: pulumi.String("1.6.2"), +// Version: pulumi.String("1.19.0"), // ConfigSync: &gkehub.FeatureMembershipConfigmanagementConfigSyncArgs{ +// Enabled: pulumi.Bool(true), // Git: &gkehub.FeatureMembershipConfigmanagementConfigSyncGitArgs{ // SyncRepo: pulumi.String("https://github.com/hashicorp/terraform"), // }, @@ -138,8 +139,9 @@ import ( // Feature: feature.Name, // Membership: membership.MembershipId, // Configmanagement: &gkehub.FeatureMembershipConfigmanagementArgs{ -// Version: pulumi.String("1.15.1"), +// Version: pulumi.String("1.19.0"), // ConfigSync: &gkehub.FeatureMembershipConfigmanagementConfigSyncArgs{ +// Enabled: pulumi.Bool(true), // Oci: &gkehub.FeatureMembershipConfigmanagementConfigSyncOciArgs{ // SyncRepo: pulumi.String("us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest"), // PolicyDir: pulumi.String("config-connector"), @@ -306,8 +308,9 @@ import ( // Membership: membership.MembershipId, // MembershipLocation: membership.Location, // Configmanagement: &gkehub.FeatureMembershipConfigmanagementArgs{ -// Version: pulumi.String("1.6.2"), +// Version: pulumi.String("1.19.0"), // ConfigSync: &gkehub.FeatureMembershipConfigmanagementConfigSyncArgs{ +// Enabled: pulumi.Bool(true), // Git: &gkehub.FeatureMembershipConfigmanagementConfigSyncGitArgs{ // SyncRepo: pulumi.String("https://github.com/hashicorp/terraform"), // }, diff --git a/sdk/go/gcp/gkehub/pulumiTypes.go b/sdk/go/gcp/gkehub/pulumiTypes.go index f34bfafcb8..221ffd7a6c 100644 --- a/sdk/go/gcp/gkehub/pulumiTypes.go +++ b/sdk/go/gcp/gkehub/pulumiTypes.go @@ -3450,15 +3450,23 @@ func (o FeatureIamMemberConditionPtrOutput) Title() pulumi.StringPtrOutput { } type FeatureMembershipConfigmanagement struct { + // (Optional, Deprecated) // Binauthz configuration for the cluster. Structure is documented below. + // This field will be ignored and should not be set. Binauthz *FeatureMembershipConfigmanagementBinauthz `pulumi:"binauthz"` // Config Sync configuration for the cluster. Structure is documented below. ConfigSync *FeatureMembershipConfigmanagementConfigSync `pulumi:"configSync"` // Hierarchy Controller configuration for the cluster. Structure is documented below. + // Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. + // Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. + // Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) + // to migrate from Hierarchy Controller to HNC. HierarchyController *FeatureMembershipConfigmanagementHierarchyController `pulumi:"hierarchyController"` // Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades. Management *string `pulumi:"management"` // Policy Controller configuration for the cluster. Structure is documented below. + // Configuring Policy Controller through the configmanagement feature is no longer recommended. + // Use the policycontroller feature instead. PolicyController *FeatureMembershipConfigmanagementPolicyController `pulumi:"policyController"` // Version of ACM installed. Version *string `pulumi:"version"` @@ -3476,15 +3484,23 @@ type FeatureMembershipConfigmanagementInput interface { } type FeatureMembershipConfigmanagementArgs struct { + // (Optional, Deprecated) // Binauthz configuration for the cluster. Structure is documented below. + // This field will be ignored and should not be set. Binauthz FeatureMembershipConfigmanagementBinauthzPtrInput `pulumi:"binauthz"` // Config Sync configuration for the cluster. Structure is documented below. ConfigSync FeatureMembershipConfigmanagementConfigSyncPtrInput `pulumi:"configSync"` // Hierarchy Controller configuration for the cluster. Structure is documented below. + // Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. + // Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. + // Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) + // to migrate from Hierarchy Controller to HNC. HierarchyController FeatureMembershipConfigmanagementHierarchyControllerPtrInput `pulumi:"hierarchyController"` // Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades. Management pulumi.StringPtrInput `pulumi:"management"` // Policy Controller configuration for the cluster. Structure is documented below. + // Configuring Policy Controller through the configmanagement feature is no longer recommended. + // Use the policycontroller feature instead. PolicyController FeatureMembershipConfigmanagementPolicyControllerPtrInput `pulumi:"policyController"` // Version of ACM installed. Version pulumi.StringPtrInput `pulumi:"version"` @@ -3567,7 +3583,9 @@ func (o FeatureMembershipConfigmanagementOutput) ToFeatureMembershipConfigmanage }).(FeatureMembershipConfigmanagementPtrOutput) } +// (Optional, Deprecated) // Binauthz configuration for the cluster. Structure is documented below. +// This field will be ignored and should not be set. func (o FeatureMembershipConfigmanagementOutput) Binauthz() FeatureMembershipConfigmanagementBinauthzPtrOutput { return o.ApplyT(func(v FeatureMembershipConfigmanagement) *FeatureMembershipConfigmanagementBinauthz { return v.Binauthz @@ -3582,6 +3600,10 @@ func (o FeatureMembershipConfigmanagementOutput) ConfigSync() FeatureMembershipC } // Hierarchy Controller configuration for the cluster. Structure is documented below. +// Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. +// Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. +// Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) +// to migrate from Hierarchy Controller to HNC. func (o FeatureMembershipConfigmanagementOutput) HierarchyController() FeatureMembershipConfigmanagementHierarchyControllerPtrOutput { return o.ApplyT(func(v FeatureMembershipConfigmanagement) *FeatureMembershipConfigmanagementHierarchyController { return v.HierarchyController @@ -3594,6 +3616,8 @@ func (o FeatureMembershipConfigmanagementOutput) Management() pulumi.StringPtrOu } // Policy Controller configuration for the cluster. Structure is documented below. +// Configuring Policy Controller through the configmanagement feature is no longer recommended. +// Use the policycontroller feature instead. func (o FeatureMembershipConfigmanagementOutput) PolicyController() FeatureMembershipConfigmanagementPolicyControllerPtrOutput { return o.ApplyT(func(v FeatureMembershipConfigmanagement) *FeatureMembershipConfigmanagementPolicyController { return v.PolicyController @@ -3629,7 +3653,9 @@ func (o FeatureMembershipConfigmanagementPtrOutput) Elem() FeatureMembershipConf }).(FeatureMembershipConfigmanagementOutput) } +// (Optional, Deprecated) // Binauthz configuration for the cluster. Structure is documented below. +// This field will be ignored and should not be set. func (o FeatureMembershipConfigmanagementPtrOutput) Binauthz() FeatureMembershipConfigmanagementBinauthzPtrOutput { return o.ApplyT(func(v *FeatureMembershipConfigmanagement) *FeatureMembershipConfigmanagementBinauthz { if v == nil { @@ -3650,6 +3676,10 @@ func (o FeatureMembershipConfigmanagementPtrOutput) ConfigSync() FeatureMembersh } // Hierarchy Controller configuration for the cluster. Structure is documented below. +// Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. +// Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. +// Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) +// to migrate from Hierarchy Controller to HNC. func (o FeatureMembershipConfigmanagementPtrOutput) HierarchyController() FeatureMembershipConfigmanagementHierarchyControllerPtrOutput { return o.ApplyT(func(v *FeatureMembershipConfigmanagement) *FeatureMembershipConfigmanagementHierarchyController { if v == nil { @@ -3670,6 +3700,8 @@ func (o FeatureMembershipConfigmanagementPtrOutput) Management() pulumi.StringPt } // Policy Controller configuration for the cluster. Structure is documented below. +// Configuring Policy Controller through the configmanagement feature is no longer recommended. +// Use the policycontroller feature instead. func (o FeatureMembershipConfigmanagementPtrOutput) PolicyController() FeatureMembershipConfigmanagementPolicyControllerPtrOutput { return o.ApplyT(func(v *FeatureMembershipConfigmanagement) *FeatureMembershipConfigmanagementPolicyController { if v == nil { diff --git a/sdk/go/gcp/iam/getWorkloadIdentityPoolProvider.go b/sdk/go/gcp/iam/getWorkloadIdentityPoolProvider.go index 125f703c3b..665d3a5536 100644 --- a/sdk/go/gcp/iam/getWorkloadIdentityPoolProvider.go +++ b/sdk/go/gcp/iam/getWorkloadIdentityPoolProvider.go @@ -81,6 +81,7 @@ type LookupWorkloadIdentityPoolProviderResult struct { State string `pulumi:"state"` WorkloadIdentityPoolId string `pulumi:"workloadIdentityPoolId"` WorkloadIdentityPoolProviderId string `pulumi:"workloadIdentityPoolProviderId"` + X509s []GetWorkloadIdentityPoolProviderX509 `pulumi:"x509s"` } func LookupWorkloadIdentityPoolProviderOutput(ctx *pulumi.Context, args LookupWorkloadIdentityPoolProviderOutputArgs, opts ...pulumi.InvokeOption) LookupWorkloadIdentityPoolProviderResultOutput { @@ -187,6 +188,10 @@ func (o LookupWorkloadIdentityPoolProviderResultOutput) WorkloadIdentityPoolProv return o.ApplyT(func(v LookupWorkloadIdentityPoolProviderResult) string { return v.WorkloadIdentityPoolProviderId }).(pulumi.StringOutput) } +func (o LookupWorkloadIdentityPoolProviderResultOutput) X509s() GetWorkloadIdentityPoolProviderX509ArrayOutput { + return o.ApplyT(func(v LookupWorkloadIdentityPoolProviderResult) []GetWorkloadIdentityPoolProviderX509 { return v.X509s }).(GetWorkloadIdentityPoolProviderX509ArrayOutput) +} + func init() { pulumi.RegisterOutputType(LookupWorkloadIdentityPoolProviderResultOutput{}) } diff --git a/sdk/go/gcp/iam/pulumiTypes.go b/sdk/go/gcp/iam/pulumiTypes.go index 2204a203bd..48942ec1e0 100644 --- a/sdk/go/gcp/iam/pulumiTypes.go +++ b/sdk/go/gcp/iam/pulumiTypes.go @@ -3328,6 +3328,8 @@ func (o WorkloadIdentityPoolProviderOidcPtrOutput) JwksJson() pulumi.StringPtrOu type WorkloadIdentityPoolProviderSaml struct { // SAML Identity provider configuration metadata xml doc. + // + // The `x509` block supports: IdpMetadataXml string `pulumi:"idpMetadataXml"` } @@ -3344,6 +3346,8 @@ type WorkloadIdentityPoolProviderSamlInput interface { type WorkloadIdentityPoolProviderSamlArgs struct { // SAML Identity provider configuration metadata xml doc. + // + // The `x509` block supports: IdpMetadataXml pulumi.StringInput `pulumi:"idpMetadataXml"` } @@ -3425,6 +3429,8 @@ func (o WorkloadIdentityPoolProviderSamlOutput) ToWorkloadIdentityPoolProviderSa } // SAML Identity provider configuration metadata xml doc. +// +// The `x509` block supports: func (o WorkloadIdentityPoolProviderSamlOutput) IdpMetadataXml() pulumi.StringOutput { return o.ApplyT(func(v WorkloadIdentityPoolProviderSaml) string { return v.IdpMetadataXml }).(pulumi.StringOutput) } @@ -3454,6 +3460,8 @@ func (o WorkloadIdentityPoolProviderSamlPtrOutput) Elem() WorkloadIdentityPoolPr } // SAML Identity provider configuration metadata xml doc. +// +// The `x509` block supports: func (o WorkloadIdentityPoolProviderSamlPtrOutput) IdpMetadataXml() pulumi.StringPtrOutput { return o.ApplyT(func(v *WorkloadIdentityPoolProviderSaml) *string { if v == nil { @@ -3463,6 +3471,545 @@ func (o WorkloadIdentityPoolProviderSamlPtrOutput) IdpMetadataXml() pulumi.Strin }).(pulumi.StringPtrOutput) } +type WorkloadIdentityPoolProviderX509 struct { + // A Trust store, use this trust store as a wrapper to config the trust + // anchor and optional intermediate cas to help build the trust chain for + // the incoming end entity certificate. Follow the x509 guidelines to + // define those PEM encoded certs. Only 1 trust store is currently + // supported. + TrustStore WorkloadIdentityPoolProviderX509TrustStore `pulumi:"trustStore"` +} + +// WorkloadIdentityPoolProviderX509Input is an input type that accepts WorkloadIdentityPoolProviderX509Args and WorkloadIdentityPoolProviderX509Output values. +// You can construct a concrete instance of `WorkloadIdentityPoolProviderX509Input` via: +// +// WorkloadIdentityPoolProviderX509Args{...} +type WorkloadIdentityPoolProviderX509Input interface { + pulumi.Input + + ToWorkloadIdentityPoolProviderX509Output() WorkloadIdentityPoolProviderX509Output + ToWorkloadIdentityPoolProviderX509OutputWithContext(context.Context) WorkloadIdentityPoolProviderX509Output +} + +type WorkloadIdentityPoolProviderX509Args struct { + // A Trust store, use this trust store as a wrapper to config the trust + // anchor and optional intermediate cas to help build the trust chain for + // the incoming end entity certificate. Follow the x509 guidelines to + // define those PEM encoded certs. Only 1 trust store is currently + // supported. + TrustStore WorkloadIdentityPoolProviderX509TrustStoreInput `pulumi:"trustStore"` +} + +func (WorkloadIdentityPoolProviderX509Args) ElementType() reflect.Type { + return reflect.TypeOf((*WorkloadIdentityPoolProviderX509)(nil)).Elem() +} + +func (i WorkloadIdentityPoolProviderX509Args) ToWorkloadIdentityPoolProviderX509Output() WorkloadIdentityPoolProviderX509Output { + return i.ToWorkloadIdentityPoolProviderX509OutputWithContext(context.Background()) +} + +func (i WorkloadIdentityPoolProviderX509Args) ToWorkloadIdentityPoolProviderX509OutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509Output { + return pulumi.ToOutputWithContext(ctx, i).(WorkloadIdentityPoolProviderX509Output) +} + +func (i WorkloadIdentityPoolProviderX509Args) ToWorkloadIdentityPoolProviderX509PtrOutput() WorkloadIdentityPoolProviderX509PtrOutput { + return i.ToWorkloadIdentityPoolProviderX509PtrOutputWithContext(context.Background()) +} + +func (i WorkloadIdentityPoolProviderX509Args) ToWorkloadIdentityPoolProviderX509PtrOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509PtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(WorkloadIdentityPoolProviderX509Output).ToWorkloadIdentityPoolProviderX509PtrOutputWithContext(ctx) +} + +// WorkloadIdentityPoolProviderX509PtrInput is an input type that accepts WorkloadIdentityPoolProviderX509Args, WorkloadIdentityPoolProviderX509Ptr and WorkloadIdentityPoolProviderX509PtrOutput values. +// You can construct a concrete instance of `WorkloadIdentityPoolProviderX509PtrInput` via: +// +// WorkloadIdentityPoolProviderX509Args{...} +// +// or: +// +// nil +type WorkloadIdentityPoolProviderX509PtrInput interface { + pulumi.Input + + ToWorkloadIdentityPoolProviderX509PtrOutput() WorkloadIdentityPoolProviderX509PtrOutput + ToWorkloadIdentityPoolProviderX509PtrOutputWithContext(context.Context) WorkloadIdentityPoolProviderX509PtrOutput +} + +type workloadIdentityPoolProviderX509PtrType WorkloadIdentityPoolProviderX509Args + +func WorkloadIdentityPoolProviderX509Ptr(v *WorkloadIdentityPoolProviderX509Args) WorkloadIdentityPoolProviderX509PtrInput { + return (*workloadIdentityPoolProviderX509PtrType)(v) +} + +func (*workloadIdentityPoolProviderX509PtrType) ElementType() reflect.Type { + return reflect.TypeOf((**WorkloadIdentityPoolProviderX509)(nil)).Elem() +} + +func (i *workloadIdentityPoolProviderX509PtrType) ToWorkloadIdentityPoolProviderX509PtrOutput() WorkloadIdentityPoolProviderX509PtrOutput { + return i.ToWorkloadIdentityPoolProviderX509PtrOutputWithContext(context.Background()) +} + +func (i *workloadIdentityPoolProviderX509PtrType) ToWorkloadIdentityPoolProviderX509PtrOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509PtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(WorkloadIdentityPoolProviderX509PtrOutput) +} + +type WorkloadIdentityPoolProviderX509Output struct{ *pulumi.OutputState } + +func (WorkloadIdentityPoolProviderX509Output) ElementType() reflect.Type { + return reflect.TypeOf((*WorkloadIdentityPoolProviderX509)(nil)).Elem() +} + +func (o WorkloadIdentityPoolProviderX509Output) ToWorkloadIdentityPoolProviderX509Output() WorkloadIdentityPoolProviderX509Output { + return o +} + +func (o WorkloadIdentityPoolProviderX509Output) ToWorkloadIdentityPoolProviderX509OutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509Output { + return o +} + +func (o WorkloadIdentityPoolProviderX509Output) ToWorkloadIdentityPoolProviderX509PtrOutput() WorkloadIdentityPoolProviderX509PtrOutput { + return o.ToWorkloadIdentityPoolProviderX509PtrOutputWithContext(context.Background()) +} + +func (o WorkloadIdentityPoolProviderX509Output) ToWorkloadIdentityPoolProviderX509PtrOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509PtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v WorkloadIdentityPoolProviderX509) *WorkloadIdentityPoolProviderX509 { + return &v + }).(WorkloadIdentityPoolProviderX509PtrOutput) +} + +// A Trust store, use this trust store as a wrapper to config the trust +// anchor and optional intermediate cas to help build the trust chain for +// the incoming end entity certificate. Follow the x509 guidelines to +// define those PEM encoded certs. Only 1 trust store is currently +// supported. +func (o WorkloadIdentityPoolProviderX509Output) TrustStore() WorkloadIdentityPoolProviderX509TrustStoreOutput { + return o.ApplyT(func(v WorkloadIdentityPoolProviderX509) WorkloadIdentityPoolProviderX509TrustStore { + return v.TrustStore + }).(WorkloadIdentityPoolProviderX509TrustStoreOutput) +} + +type WorkloadIdentityPoolProviderX509PtrOutput struct{ *pulumi.OutputState } + +func (WorkloadIdentityPoolProviderX509PtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**WorkloadIdentityPoolProviderX509)(nil)).Elem() +} + +func (o WorkloadIdentityPoolProviderX509PtrOutput) ToWorkloadIdentityPoolProviderX509PtrOutput() WorkloadIdentityPoolProviderX509PtrOutput { + return o +} + +func (o WorkloadIdentityPoolProviderX509PtrOutput) ToWorkloadIdentityPoolProviderX509PtrOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509PtrOutput { + return o +} + +func (o WorkloadIdentityPoolProviderX509PtrOutput) Elem() WorkloadIdentityPoolProviderX509Output { + return o.ApplyT(func(v *WorkloadIdentityPoolProviderX509) WorkloadIdentityPoolProviderX509 { + if v != nil { + return *v + } + var ret WorkloadIdentityPoolProviderX509 + return ret + }).(WorkloadIdentityPoolProviderX509Output) +} + +// A Trust store, use this trust store as a wrapper to config the trust +// anchor and optional intermediate cas to help build the trust chain for +// the incoming end entity certificate. Follow the x509 guidelines to +// define those PEM encoded certs. Only 1 trust store is currently +// supported. +func (o WorkloadIdentityPoolProviderX509PtrOutput) TrustStore() WorkloadIdentityPoolProviderX509TrustStorePtrOutput { + return o.ApplyT(func(v *WorkloadIdentityPoolProviderX509) *WorkloadIdentityPoolProviderX509TrustStore { + if v == nil { + return nil + } + return &v.TrustStore + }).(WorkloadIdentityPoolProviderX509TrustStorePtrOutput) +} + +type WorkloadIdentityPoolProviderX509TrustStore struct { + // Set of intermediate CA certificates used for building the trust chain to + // trust anchor. + // IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + // Structure is documented below. + IntermediateCas []WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa `pulumi:"intermediateCas"` + // List of Trust Anchors to be used while performing validation + // against a given TrustStore. The incoming end entity's certificate + // must be chained up to one of the trust anchors here. + // Structure is documented below. + TrustAnchors []WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor `pulumi:"trustAnchors"` +} + +// WorkloadIdentityPoolProviderX509TrustStoreInput is an input type that accepts WorkloadIdentityPoolProviderX509TrustStoreArgs and WorkloadIdentityPoolProviderX509TrustStoreOutput values. +// You can construct a concrete instance of `WorkloadIdentityPoolProviderX509TrustStoreInput` via: +// +// WorkloadIdentityPoolProviderX509TrustStoreArgs{...} +type WorkloadIdentityPoolProviderX509TrustStoreInput interface { + pulumi.Input + + ToWorkloadIdentityPoolProviderX509TrustStoreOutput() WorkloadIdentityPoolProviderX509TrustStoreOutput + ToWorkloadIdentityPoolProviderX509TrustStoreOutputWithContext(context.Context) WorkloadIdentityPoolProviderX509TrustStoreOutput +} + +type WorkloadIdentityPoolProviderX509TrustStoreArgs struct { + // Set of intermediate CA certificates used for building the trust chain to + // trust anchor. + // IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + // Structure is documented below. + IntermediateCas WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayInput `pulumi:"intermediateCas"` + // List of Trust Anchors to be used while performing validation + // against a given TrustStore. The incoming end entity's certificate + // must be chained up to one of the trust anchors here. + // Structure is documented below. + TrustAnchors WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayInput `pulumi:"trustAnchors"` +} + +func (WorkloadIdentityPoolProviderX509TrustStoreArgs) ElementType() reflect.Type { + return reflect.TypeOf((*WorkloadIdentityPoolProviderX509TrustStore)(nil)).Elem() +} + +func (i WorkloadIdentityPoolProviderX509TrustStoreArgs) ToWorkloadIdentityPoolProviderX509TrustStoreOutput() WorkloadIdentityPoolProviderX509TrustStoreOutput { + return i.ToWorkloadIdentityPoolProviderX509TrustStoreOutputWithContext(context.Background()) +} + +func (i WorkloadIdentityPoolProviderX509TrustStoreArgs) ToWorkloadIdentityPoolProviderX509TrustStoreOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509TrustStoreOutput { + return pulumi.ToOutputWithContext(ctx, i).(WorkloadIdentityPoolProviderX509TrustStoreOutput) +} + +func (i WorkloadIdentityPoolProviderX509TrustStoreArgs) ToWorkloadIdentityPoolProviderX509TrustStorePtrOutput() WorkloadIdentityPoolProviderX509TrustStorePtrOutput { + return i.ToWorkloadIdentityPoolProviderX509TrustStorePtrOutputWithContext(context.Background()) +} + +func (i WorkloadIdentityPoolProviderX509TrustStoreArgs) ToWorkloadIdentityPoolProviderX509TrustStorePtrOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509TrustStorePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(WorkloadIdentityPoolProviderX509TrustStoreOutput).ToWorkloadIdentityPoolProviderX509TrustStorePtrOutputWithContext(ctx) +} + +// WorkloadIdentityPoolProviderX509TrustStorePtrInput is an input type that accepts WorkloadIdentityPoolProviderX509TrustStoreArgs, WorkloadIdentityPoolProviderX509TrustStorePtr and WorkloadIdentityPoolProviderX509TrustStorePtrOutput values. +// You can construct a concrete instance of `WorkloadIdentityPoolProviderX509TrustStorePtrInput` via: +// +// WorkloadIdentityPoolProviderX509TrustStoreArgs{...} +// +// or: +// +// nil +type WorkloadIdentityPoolProviderX509TrustStorePtrInput interface { + pulumi.Input + + ToWorkloadIdentityPoolProviderX509TrustStorePtrOutput() WorkloadIdentityPoolProviderX509TrustStorePtrOutput + ToWorkloadIdentityPoolProviderX509TrustStorePtrOutputWithContext(context.Context) WorkloadIdentityPoolProviderX509TrustStorePtrOutput +} + +type workloadIdentityPoolProviderX509TrustStorePtrType WorkloadIdentityPoolProviderX509TrustStoreArgs + +func WorkloadIdentityPoolProviderX509TrustStorePtr(v *WorkloadIdentityPoolProviderX509TrustStoreArgs) WorkloadIdentityPoolProviderX509TrustStorePtrInput { + return (*workloadIdentityPoolProviderX509TrustStorePtrType)(v) +} + +func (*workloadIdentityPoolProviderX509TrustStorePtrType) ElementType() reflect.Type { + return reflect.TypeOf((**WorkloadIdentityPoolProviderX509TrustStore)(nil)).Elem() +} + +func (i *workloadIdentityPoolProviderX509TrustStorePtrType) ToWorkloadIdentityPoolProviderX509TrustStorePtrOutput() WorkloadIdentityPoolProviderX509TrustStorePtrOutput { + return i.ToWorkloadIdentityPoolProviderX509TrustStorePtrOutputWithContext(context.Background()) +} + +func (i *workloadIdentityPoolProviderX509TrustStorePtrType) ToWorkloadIdentityPoolProviderX509TrustStorePtrOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509TrustStorePtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(WorkloadIdentityPoolProviderX509TrustStorePtrOutput) +} + +type WorkloadIdentityPoolProviderX509TrustStoreOutput struct{ *pulumi.OutputState } + +func (WorkloadIdentityPoolProviderX509TrustStoreOutput) ElementType() reflect.Type { + return reflect.TypeOf((*WorkloadIdentityPoolProviderX509TrustStore)(nil)).Elem() +} + +func (o WorkloadIdentityPoolProviderX509TrustStoreOutput) ToWorkloadIdentityPoolProviderX509TrustStoreOutput() WorkloadIdentityPoolProviderX509TrustStoreOutput { + return o +} + +func (o WorkloadIdentityPoolProviderX509TrustStoreOutput) ToWorkloadIdentityPoolProviderX509TrustStoreOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509TrustStoreOutput { + return o +} + +func (o WorkloadIdentityPoolProviderX509TrustStoreOutput) ToWorkloadIdentityPoolProviderX509TrustStorePtrOutput() WorkloadIdentityPoolProviderX509TrustStorePtrOutput { + return o.ToWorkloadIdentityPoolProviderX509TrustStorePtrOutputWithContext(context.Background()) +} + +func (o WorkloadIdentityPoolProviderX509TrustStoreOutput) ToWorkloadIdentityPoolProviderX509TrustStorePtrOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509TrustStorePtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v WorkloadIdentityPoolProviderX509TrustStore) *WorkloadIdentityPoolProviderX509TrustStore { + return &v + }).(WorkloadIdentityPoolProviderX509TrustStorePtrOutput) +} + +// Set of intermediate CA certificates used for building the trust chain to +// trust anchor. +// IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. +// Structure is documented below. +func (o WorkloadIdentityPoolProviderX509TrustStoreOutput) IntermediateCas() WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput { + return o.ApplyT(func(v WorkloadIdentityPoolProviderX509TrustStore) []WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa { + return v.IntermediateCas + }).(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput) +} + +// List of Trust Anchors to be used while performing validation +// against a given TrustStore. The incoming end entity's certificate +// must be chained up to one of the trust anchors here. +// Structure is documented below. +func (o WorkloadIdentityPoolProviderX509TrustStoreOutput) TrustAnchors() WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput { + return o.ApplyT(func(v WorkloadIdentityPoolProviderX509TrustStore) []WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor { + return v.TrustAnchors + }).(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput) +} + +type WorkloadIdentityPoolProviderX509TrustStorePtrOutput struct{ *pulumi.OutputState } + +func (WorkloadIdentityPoolProviderX509TrustStorePtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**WorkloadIdentityPoolProviderX509TrustStore)(nil)).Elem() +} + +func (o WorkloadIdentityPoolProviderX509TrustStorePtrOutput) ToWorkloadIdentityPoolProviderX509TrustStorePtrOutput() WorkloadIdentityPoolProviderX509TrustStorePtrOutput { + return o +} + +func (o WorkloadIdentityPoolProviderX509TrustStorePtrOutput) ToWorkloadIdentityPoolProviderX509TrustStorePtrOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509TrustStorePtrOutput { + return o +} + +func (o WorkloadIdentityPoolProviderX509TrustStorePtrOutput) Elem() WorkloadIdentityPoolProviderX509TrustStoreOutput { + return o.ApplyT(func(v *WorkloadIdentityPoolProviderX509TrustStore) WorkloadIdentityPoolProviderX509TrustStore { + if v != nil { + return *v + } + var ret WorkloadIdentityPoolProviderX509TrustStore + return ret + }).(WorkloadIdentityPoolProviderX509TrustStoreOutput) +} + +// Set of intermediate CA certificates used for building the trust chain to +// trust anchor. +// IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. +// Structure is documented below. +func (o WorkloadIdentityPoolProviderX509TrustStorePtrOutput) IntermediateCas() WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput { + return o.ApplyT(func(v *WorkloadIdentityPoolProviderX509TrustStore) []WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa { + if v == nil { + return nil + } + return v.IntermediateCas + }).(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput) +} + +// List of Trust Anchors to be used while performing validation +// against a given TrustStore. The incoming end entity's certificate +// must be chained up to one of the trust anchors here. +// Structure is documented below. +func (o WorkloadIdentityPoolProviderX509TrustStorePtrOutput) TrustAnchors() WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput { + return o.ApplyT(func(v *WorkloadIdentityPoolProviderX509TrustStore) []WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor { + if v == nil { + return nil + } + return v.TrustAnchors + }).(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput) +} + +type WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa struct { + // PEM certificate of the PKI used for validation. Must only contain one + // ca certificate(either root or intermediate cert). + PemCertificate *string `pulumi:"pemCertificate"` +} + +// WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaInput is an input type that accepts WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs and WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput values. +// You can construct a concrete instance of `WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaInput` via: +// +// WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs{...} +type WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaInput interface { + pulumi.Input + + ToWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput() WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput + ToWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutputWithContext(context.Context) WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput +} + +type WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs struct { + // PEM certificate of the PKI used for validation. Must only contain one + // ca certificate(either root or intermediate cert). + PemCertificate pulumi.StringPtrInput `pulumi:"pemCertificate"` +} + +func (WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs) ElementType() reflect.Type { + return reflect.TypeOf((*WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa)(nil)).Elem() +} + +func (i WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs) ToWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput() WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput { + return i.ToWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutputWithContext(context.Background()) +} + +func (i WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs) ToWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput { + return pulumi.ToOutputWithContext(ctx, i).(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput) +} + +// WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayInput is an input type that accepts WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray and WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput values. +// You can construct a concrete instance of `WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayInput` via: +// +// WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray{ WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs{...} } +type WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayInput interface { + pulumi.Input + + ToWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput() WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput + ToWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutputWithContext(context.Context) WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput +} + +type WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray []WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaInput + +func (WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa)(nil)).Elem() +} + +func (i WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray) ToWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput() WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput { + return i.ToWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutputWithContext(context.Background()) +} + +func (i WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray) ToWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput) +} + +type WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput struct{ *pulumi.OutputState } + +func (WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput) ElementType() reflect.Type { + return reflect.TypeOf((*WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa)(nil)).Elem() +} + +func (o WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput) ToWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput() WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput { + return o +} + +func (o WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput) ToWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput { + return o +} + +// PEM certificate of the PKI used for validation. Must only contain one +// ca certificate(either root or intermediate cert). +func (o WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput) PemCertificate() pulumi.StringPtrOutput { + return o.ApplyT(func(v WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa) *string { return v.PemCertificate }).(pulumi.StringPtrOutput) +} + +type WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput struct{ *pulumi.OutputState } + +func (WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa)(nil)).Elem() +} + +func (o WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput) ToWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput() WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput { + return o +} + +func (o WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput) ToWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput { + return o +} + +func (o WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput) Index(i pulumi.IntInput) WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa { + return vs[0].([]WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa)[vs[1].(int)] + }).(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput) +} + +type WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor struct { + // PEM certificate of the PKI used for validation. Must only contain one + // ca certificate(either root or intermediate cert). + PemCertificate *string `pulumi:"pemCertificate"` +} + +// WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorInput is an input type that accepts WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs and WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput values. +// You can construct a concrete instance of `WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorInput` via: +// +// WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs{...} +type WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorInput interface { + pulumi.Input + + ToWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput() WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput + ToWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutputWithContext(context.Context) WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput +} + +type WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs struct { + // PEM certificate of the PKI used for validation. Must only contain one + // ca certificate(either root or intermediate cert). + PemCertificate pulumi.StringPtrInput `pulumi:"pemCertificate"` +} + +func (WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs) ElementType() reflect.Type { + return reflect.TypeOf((*WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor)(nil)).Elem() +} + +func (i WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs) ToWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput() WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput { + return i.ToWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutputWithContext(context.Background()) +} + +func (i WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs) ToWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput { + return pulumi.ToOutputWithContext(ctx, i).(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput) +} + +// WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayInput is an input type that accepts WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray and WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput values. +// You can construct a concrete instance of `WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayInput` via: +// +// WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray{ WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs{...} } +type WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayInput interface { + pulumi.Input + + ToWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput() WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput + ToWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutputWithContext(context.Context) WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput +} + +type WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray []WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorInput + +func (WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor)(nil)).Elem() +} + +func (i WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray) ToWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput() WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput { + return i.ToWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutputWithContext(context.Background()) +} + +func (i WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray) ToWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput) +} + +type WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput struct{ *pulumi.OutputState } + +func (WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput) ElementType() reflect.Type { + return reflect.TypeOf((*WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor)(nil)).Elem() +} + +func (o WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput) ToWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput() WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput { + return o +} + +func (o WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput) ToWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput { + return o +} + +// PEM certificate of the PKI used for validation. Must only contain one +// ca certificate(either root or intermediate cert). +func (o WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput) PemCertificate() pulumi.StringPtrOutput { + return o.ApplyT(func(v WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor) *string { return v.PemCertificate }).(pulumi.StringPtrOutput) +} + +type WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput struct{ *pulumi.OutputState } + +func (WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor)(nil)).Elem() +} + +func (o WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput) ToWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput() WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput { + return o +} + +func (o WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput) ToWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutputWithContext(ctx context.Context) WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput { + return o +} + +func (o WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput) Index(i pulumi.IntInput) WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor { + return vs[0].([]WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor)[vs[1].(int)] + }).(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput) +} + type GetTestablePermissionsPermission struct { // Whether the corresponding API has been enabled for the resource. ApiDisabled bool `pulumi:"apiDisabled"` @@ -4006,6 +4553,439 @@ func (o GetWorkloadIdentityPoolProviderSamlArrayOutput) Index(i pulumi.IntInput) }).(GetWorkloadIdentityPoolProviderSamlOutput) } +type GetWorkloadIdentityPoolProviderX509 struct { + // A Trust store, use this trust store as a wrapper to config the trust + // anchor and optional intermediate cas to help build the trust chain for + // the incoming end entity certificate. Follow the x509 guidelines to + // define those PEM encoded certs. Only 1 trust store is currently + // supported. + TrustStores []GetWorkloadIdentityPoolProviderX509TrustStore `pulumi:"trustStores"` +} + +// GetWorkloadIdentityPoolProviderX509Input is an input type that accepts GetWorkloadIdentityPoolProviderX509Args and GetWorkloadIdentityPoolProviderX509Output values. +// You can construct a concrete instance of `GetWorkloadIdentityPoolProviderX509Input` via: +// +// GetWorkloadIdentityPoolProviderX509Args{...} +type GetWorkloadIdentityPoolProviderX509Input interface { + pulumi.Input + + ToGetWorkloadIdentityPoolProviderX509Output() GetWorkloadIdentityPoolProviderX509Output + ToGetWorkloadIdentityPoolProviderX509OutputWithContext(context.Context) GetWorkloadIdentityPoolProviderX509Output +} + +type GetWorkloadIdentityPoolProviderX509Args struct { + // A Trust store, use this trust store as a wrapper to config the trust + // anchor and optional intermediate cas to help build the trust chain for + // the incoming end entity certificate. Follow the x509 guidelines to + // define those PEM encoded certs. Only 1 trust store is currently + // supported. + TrustStores GetWorkloadIdentityPoolProviderX509TrustStoreArrayInput `pulumi:"trustStores"` +} + +func (GetWorkloadIdentityPoolProviderX509Args) ElementType() reflect.Type { + return reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509)(nil)).Elem() +} + +func (i GetWorkloadIdentityPoolProviderX509Args) ToGetWorkloadIdentityPoolProviderX509Output() GetWorkloadIdentityPoolProviderX509Output { + return i.ToGetWorkloadIdentityPoolProviderX509OutputWithContext(context.Background()) +} + +func (i GetWorkloadIdentityPoolProviderX509Args) ToGetWorkloadIdentityPoolProviderX509OutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509Output { + return pulumi.ToOutputWithContext(ctx, i).(GetWorkloadIdentityPoolProviderX509Output) +} + +// GetWorkloadIdentityPoolProviderX509ArrayInput is an input type that accepts GetWorkloadIdentityPoolProviderX509Array and GetWorkloadIdentityPoolProviderX509ArrayOutput values. +// You can construct a concrete instance of `GetWorkloadIdentityPoolProviderX509ArrayInput` via: +// +// GetWorkloadIdentityPoolProviderX509Array{ GetWorkloadIdentityPoolProviderX509Args{...} } +type GetWorkloadIdentityPoolProviderX509ArrayInput interface { + pulumi.Input + + ToGetWorkloadIdentityPoolProviderX509ArrayOutput() GetWorkloadIdentityPoolProviderX509ArrayOutput + ToGetWorkloadIdentityPoolProviderX509ArrayOutputWithContext(context.Context) GetWorkloadIdentityPoolProviderX509ArrayOutput +} + +type GetWorkloadIdentityPoolProviderX509Array []GetWorkloadIdentityPoolProviderX509Input + +func (GetWorkloadIdentityPoolProviderX509Array) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetWorkloadIdentityPoolProviderX509)(nil)).Elem() +} + +func (i GetWorkloadIdentityPoolProviderX509Array) ToGetWorkloadIdentityPoolProviderX509ArrayOutput() GetWorkloadIdentityPoolProviderX509ArrayOutput { + return i.ToGetWorkloadIdentityPoolProviderX509ArrayOutputWithContext(context.Background()) +} + +func (i GetWorkloadIdentityPoolProviderX509Array) ToGetWorkloadIdentityPoolProviderX509ArrayOutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509ArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetWorkloadIdentityPoolProviderX509ArrayOutput) +} + +type GetWorkloadIdentityPoolProviderX509Output struct{ *pulumi.OutputState } + +func (GetWorkloadIdentityPoolProviderX509Output) ElementType() reflect.Type { + return reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509)(nil)).Elem() +} + +func (o GetWorkloadIdentityPoolProviderX509Output) ToGetWorkloadIdentityPoolProviderX509Output() GetWorkloadIdentityPoolProviderX509Output { + return o +} + +func (o GetWorkloadIdentityPoolProviderX509Output) ToGetWorkloadIdentityPoolProviderX509OutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509Output { + return o +} + +// A Trust store, use this trust store as a wrapper to config the trust +// anchor and optional intermediate cas to help build the trust chain for +// the incoming end entity certificate. Follow the x509 guidelines to +// define those PEM encoded certs. Only 1 trust store is currently +// supported. +func (o GetWorkloadIdentityPoolProviderX509Output) TrustStores() GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput { + return o.ApplyT(func(v GetWorkloadIdentityPoolProviderX509) []GetWorkloadIdentityPoolProviderX509TrustStore { + return v.TrustStores + }).(GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput) +} + +type GetWorkloadIdentityPoolProviderX509ArrayOutput struct{ *pulumi.OutputState } + +func (GetWorkloadIdentityPoolProviderX509ArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetWorkloadIdentityPoolProviderX509)(nil)).Elem() +} + +func (o GetWorkloadIdentityPoolProviderX509ArrayOutput) ToGetWorkloadIdentityPoolProviderX509ArrayOutput() GetWorkloadIdentityPoolProviderX509ArrayOutput { + return o +} + +func (o GetWorkloadIdentityPoolProviderX509ArrayOutput) ToGetWorkloadIdentityPoolProviderX509ArrayOutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509ArrayOutput { + return o +} + +func (o GetWorkloadIdentityPoolProviderX509ArrayOutput) Index(i pulumi.IntInput) GetWorkloadIdentityPoolProviderX509Output { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetWorkloadIdentityPoolProviderX509 { + return vs[0].([]GetWorkloadIdentityPoolProviderX509)[vs[1].(int)] + }).(GetWorkloadIdentityPoolProviderX509Output) +} + +type GetWorkloadIdentityPoolProviderX509TrustStore struct { + // Set of intermediate CA certificates used for building the trust chain to + // trust anchor. + // IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + IntermediateCas []GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa `pulumi:"intermediateCas"` + // List of Trust Anchors to be used while performing validation + // against a given TrustStore. The incoming end entity's certificate + // must be chained up to one of the trust anchors here. + TrustAnchors []GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor `pulumi:"trustAnchors"` +} + +// GetWorkloadIdentityPoolProviderX509TrustStoreInput is an input type that accepts GetWorkloadIdentityPoolProviderX509TrustStoreArgs and GetWorkloadIdentityPoolProviderX509TrustStoreOutput values. +// You can construct a concrete instance of `GetWorkloadIdentityPoolProviderX509TrustStoreInput` via: +// +// GetWorkloadIdentityPoolProviderX509TrustStoreArgs{...} +type GetWorkloadIdentityPoolProviderX509TrustStoreInput interface { + pulumi.Input + + ToGetWorkloadIdentityPoolProviderX509TrustStoreOutput() GetWorkloadIdentityPoolProviderX509TrustStoreOutput + ToGetWorkloadIdentityPoolProviderX509TrustStoreOutputWithContext(context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreOutput +} + +type GetWorkloadIdentityPoolProviderX509TrustStoreArgs struct { + // Set of intermediate CA certificates used for building the trust chain to + // trust anchor. + // IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + IntermediateCas GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayInput `pulumi:"intermediateCas"` + // List of Trust Anchors to be used while performing validation + // against a given TrustStore. The incoming end entity's certificate + // must be chained up to one of the trust anchors here. + TrustAnchors GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayInput `pulumi:"trustAnchors"` +} + +func (GetWorkloadIdentityPoolProviderX509TrustStoreArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509TrustStore)(nil)).Elem() +} + +func (i GetWorkloadIdentityPoolProviderX509TrustStoreArgs) ToGetWorkloadIdentityPoolProviderX509TrustStoreOutput() GetWorkloadIdentityPoolProviderX509TrustStoreOutput { + return i.ToGetWorkloadIdentityPoolProviderX509TrustStoreOutputWithContext(context.Background()) +} + +func (i GetWorkloadIdentityPoolProviderX509TrustStoreArgs) ToGetWorkloadIdentityPoolProviderX509TrustStoreOutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetWorkloadIdentityPoolProviderX509TrustStoreOutput) +} + +// GetWorkloadIdentityPoolProviderX509TrustStoreArrayInput is an input type that accepts GetWorkloadIdentityPoolProviderX509TrustStoreArray and GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput values. +// You can construct a concrete instance of `GetWorkloadIdentityPoolProviderX509TrustStoreArrayInput` via: +// +// GetWorkloadIdentityPoolProviderX509TrustStoreArray{ GetWorkloadIdentityPoolProviderX509TrustStoreArgs{...} } +type GetWorkloadIdentityPoolProviderX509TrustStoreArrayInput interface { + pulumi.Input + + ToGetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput() GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput + ToGetWorkloadIdentityPoolProviderX509TrustStoreArrayOutputWithContext(context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput +} + +type GetWorkloadIdentityPoolProviderX509TrustStoreArray []GetWorkloadIdentityPoolProviderX509TrustStoreInput + +func (GetWorkloadIdentityPoolProviderX509TrustStoreArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetWorkloadIdentityPoolProviderX509TrustStore)(nil)).Elem() +} + +func (i GetWorkloadIdentityPoolProviderX509TrustStoreArray) ToGetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput() GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput { + return i.ToGetWorkloadIdentityPoolProviderX509TrustStoreArrayOutputWithContext(context.Background()) +} + +func (i GetWorkloadIdentityPoolProviderX509TrustStoreArray) ToGetWorkloadIdentityPoolProviderX509TrustStoreArrayOutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput) +} + +type GetWorkloadIdentityPoolProviderX509TrustStoreOutput struct{ *pulumi.OutputState } + +func (GetWorkloadIdentityPoolProviderX509TrustStoreOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509TrustStore)(nil)).Elem() +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreOutput) ToGetWorkloadIdentityPoolProviderX509TrustStoreOutput() GetWorkloadIdentityPoolProviderX509TrustStoreOutput { + return o +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreOutput) ToGetWorkloadIdentityPoolProviderX509TrustStoreOutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreOutput { + return o +} + +// Set of intermediate CA certificates used for building the trust chain to +// trust anchor. +// IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. +func (o GetWorkloadIdentityPoolProviderX509TrustStoreOutput) IntermediateCas() GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput { + return o.ApplyT(func(v GetWorkloadIdentityPoolProviderX509TrustStore) []GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa { + return v.IntermediateCas + }).(GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput) +} + +// List of Trust Anchors to be used while performing validation +// against a given TrustStore. The incoming end entity's certificate +// must be chained up to one of the trust anchors here. +func (o GetWorkloadIdentityPoolProviderX509TrustStoreOutput) TrustAnchors() GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput { + return o.ApplyT(func(v GetWorkloadIdentityPoolProviderX509TrustStore) []GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor { + return v.TrustAnchors + }).(GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput) +} + +type GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput struct{ *pulumi.OutputState } + +func (GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetWorkloadIdentityPoolProviderX509TrustStore)(nil)).Elem() +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput) ToGetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput() GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput { + return o +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput) ToGetWorkloadIdentityPoolProviderX509TrustStoreArrayOutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput { + return o +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput) Index(i pulumi.IntInput) GetWorkloadIdentityPoolProviderX509TrustStoreOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetWorkloadIdentityPoolProviderX509TrustStore { + return vs[0].([]GetWorkloadIdentityPoolProviderX509TrustStore)[vs[1].(int)] + }).(GetWorkloadIdentityPoolProviderX509TrustStoreOutput) +} + +type GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa struct { + // PEM certificate of the PKI used for validation. Must only contain one + // ca certificate(either root or intermediate cert). + PemCertificate string `pulumi:"pemCertificate"` +} + +// GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaInput is an input type that accepts GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs and GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput values. +// You can construct a concrete instance of `GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaInput` via: +// +// GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs{...} +type GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaInput interface { + pulumi.Input + + ToGetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput() GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput + ToGetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutputWithContext(context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput +} + +type GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs struct { + // PEM certificate of the PKI used for validation. Must only contain one + // ca certificate(either root or intermediate cert). + PemCertificate pulumi.StringInput `pulumi:"pemCertificate"` +} + +func (GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa)(nil)).Elem() +} + +func (i GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs) ToGetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput() GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput { + return i.ToGetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutputWithContext(context.Background()) +} + +func (i GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs) ToGetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput) +} + +// GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayInput is an input type that accepts GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray and GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput values. +// You can construct a concrete instance of `GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayInput` via: +// +// GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray{ GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs{...} } +type GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayInput interface { + pulumi.Input + + ToGetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput() GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput + ToGetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutputWithContext(context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput +} + +type GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray []GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaInput + +func (GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa)(nil)).Elem() +} + +func (i GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray) ToGetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput() GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput { + return i.ToGetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutputWithContext(context.Background()) +} + +func (i GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray) ToGetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput) +} + +type GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput struct{ *pulumi.OutputState } + +func (GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa)(nil)).Elem() +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput) ToGetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput() GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput { + return o +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput) ToGetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput { + return o +} + +// PEM certificate of the PKI used for validation. Must only contain one +// ca certificate(either root or intermediate cert). +func (o GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput) PemCertificate() pulumi.StringOutput { + return o.ApplyT(func(v GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa) string { return v.PemCertificate }).(pulumi.StringOutput) +} + +type GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput struct{ *pulumi.OutputState } + +func (GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa)(nil)).Elem() +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput) ToGetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput() GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput { + return o +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput) ToGetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput { + return o +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput) Index(i pulumi.IntInput) GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa { + return vs[0].([]GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa)[vs[1].(int)] + }).(GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput) +} + +type GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor struct { + // PEM certificate of the PKI used for validation. Must only contain one + // ca certificate(either root or intermediate cert). + PemCertificate string `pulumi:"pemCertificate"` +} + +// GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorInput is an input type that accepts GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs and GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput values. +// You can construct a concrete instance of `GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorInput` via: +// +// GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs{...} +type GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorInput interface { + pulumi.Input + + ToGetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput() GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput + ToGetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutputWithContext(context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput +} + +type GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs struct { + // PEM certificate of the PKI used for validation. Must only contain one + // ca certificate(either root or intermediate cert). + PemCertificate pulumi.StringInput `pulumi:"pemCertificate"` +} + +func (GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor)(nil)).Elem() +} + +func (i GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs) ToGetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput() GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput { + return i.ToGetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutputWithContext(context.Background()) +} + +func (i GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs) ToGetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput) +} + +// GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayInput is an input type that accepts GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray and GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput values. +// You can construct a concrete instance of `GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayInput` via: +// +// GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray{ GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs{...} } +type GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayInput interface { + pulumi.Input + + ToGetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput() GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput + ToGetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutputWithContext(context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput +} + +type GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray []GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorInput + +func (GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor)(nil)).Elem() +} + +func (i GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray) ToGetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput() GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput { + return i.ToGetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutputWithContext(context.Background()) +} + +func (i GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray) ToGetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput) +} + +type GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput struct{ *pulumi.OutputState } + +func (GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor)(nil)).Elem() +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput) ToGetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput() GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput { + return o +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput) ToGetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput { + return o +} + +// PEM certificate of the PKI used for validation. Must only contain one +// ca certificate(either root or intermediate cert). +func (o GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput) PemCertificate() pulumi.StringOutput { + return o.ApplyT(func(v GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor) string { return v.PemCertificate }).(pulumi.StringOutput) +} + +type GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput struct{ *pulumi.OutputState } + +func (GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor)(nil)).Elem() +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput) ToGetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput() GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput { + return o +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput) ToGetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutputWithContext(ctx context.Context) GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput { + return o +} + +func (o GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput) Index(i pulumi.IntInput) GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor { + return vs[0].([]GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor)[vs[1].(int)] + }).(GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput) +} + func init() { pulumi.RegisterInputType(reflect.TypeOf((*AccessBoundaryPolicyRuleInput)(nil)).Elem(), AccessBoundaryPolicyRuleArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*AccessBoundaryPolicyRuleArrayInput)(nil)).Elem(), AccessBoundaryPolicyRuleArray{}) @@ -4047,6 +5027,14 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*WorkloadIdentityPoolProviderOidcPtrInput)(nil)).Elem(), WorkloadIdentityPoolProviderOidcArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*WorkloadIdentityPoolProviderSamlInput)(nil)).Elem(), WorkloadIdentityPoolProviderSamlArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*WorkloadIdentityPoolProviderSamlPtrInput)(nil)).Elem(), WorkloadIdentityPoolProviderSamlArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*WorkloadIdentityPoolProviderX509Input)(nil)).Elem(), WorkloadIdentityPoolProviderX509Args{}) + pulumi.RegisterInputType(reflect.TypeOf((*WorkloadIdentityPoolProviderX509PtrInput)(nil)).Elem(), WorkloadIdentityPoolProviderX509Args{}) + pulumi.RegisterInputType(reflect.TypeOf((*WorkloadIdentityPoolProviderX509TrustStoreInput)(nil)).Elem(), WorkloadIdentityPoolProviderX509TrustStoreArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*WorkloadIdentityPoolProviderX509TrustStorePtrInput)(nil)).Elem(), WorkloadIdentityPoolProviderX509TrustStoreArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaInput)(nil)).Elem(), WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayInput)(nil)).Elem(), WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorInput)(nil)).Elem(), WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayInput)(nil)).Elem(), WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetTestablePermissionsPermissionInput)(nil)).Elem(), GetTestablePermissionsPermissionArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetTestablePermissionsPermissionArrayInput)(nil)).Elem(), GetTestablePermissionsPermissionArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetWorkloadIdentityPoolProviderAwInput)(nil)).Elem(), GetWorkloadIdentityPoolProviderAwArgs{}) @@ -4055,6 +5043,14 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*GetWorkloadIdentityPoolProviderOidcArrayInput)(nil)).Elem(), GetWorkloadIdentityPoolProviderOidcArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetWorkloadIdentityPoolProviderSamlInput)(nil)).Elem(), GetWorkloadIdentityPoolProviderSamlArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetWorkloadIdentityPoolProviderSamlArrayInput)(nil)).Elem(), GetWorkloadIdentityPoolProviderSamlArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509Input)(nil)).Elem(), GetWorkloadIdentityPoolProviderX509Args{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509ArrayInput)(nil)).Elem(), GetWorkloadIdentityPoolProviderX509Array{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509TrustStoreInput)(nil)).Elem(), GetWorkloadIdentityPoolProviderX509TrustStoreArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509TrustStoreArrayInput)(nil)).Elem(), GetWorkloadIdentityPoolProviderX509TrustStoreArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaInput)(nil)).Elem(), GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayInput)(nil)).Elem(), GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorInput)(nil)).Elem(), GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayInput)(nil)).Elem(), GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray{}) pulumi.RegisterOutputType(AccessBoundaryPolicyRuleOutput{}) pulumi.RegisterOutputType(AccessBoundaryPolicyRuleArrayOutput{}) pulumi.RegisterOutputType(AccessBoundaryPolicyRuleAccessBoundaryRuleOutput{}) @@ -4095,6 +5091,14 @@ func init() { pulumi.RegisterOutputType(WorkloadIdentityPoolProviderOidcPtrOutput{}) pulumi.RegisterOutputType(WorkloadIdentityPoolProviderSamlOutput{}) pulumi.RegisterOutputType(WorkloadIdentityPoolProviderSamlPtrOutput{}) + pulumi.RegisterOutputType(WorkloadIdentityPoolProviderX509Output{}) + pulumi.RegisterOutputType(WorkloadIdentityPoolProviderX509PtrOutput{}) + pulumi.RegisterOutputType(WorkloadIdentityPoolProviderX509TrustStoreOutput{}) + pulumi.RegisterOutputType(WorkloadIdentityPoolProviderX509TrustStorePtrOutput{}) + pulumi.RegisterOutputType(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput{}) + pulumi.RegisterOutputType(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput{}) + pulumi.RegisterOutputType(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput{}) + pulumi.RegisterOutputType(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput{}) pulumi.RegisterOutputType(GetTestablePermissionsPermissionOutput{}) pulumi.RegisterOutputType(GetTestablePermissionsPermissionArrayOutput{}) pulumi.RegisterOutputType(GetWorkloadIdentityPoolProviderAwOutput{}) @@ -4103,4 +5107,12 @@ func init() { pulumi.RegisterOutputType(GetWorkloadIdentityPoolProviderOidcArrayOutput{}) pulumi.RegisterOutputType(GetWorkloadIdentityPoolProviderSamlOutput{}) pulumi.RegisterOutputType(GetWorkloadIdentityPoolProviderSamlArrayOutput{}) + pulumi.RegisterOutputType(GetWorkloadIdentityPoolProviderX509Output{}) + pulumi.RegisterOutputType(GetWorkloadIdentityPoolProviderX509ArrayOutput{}) + pulumi.RegisterOutputType(GetWorkloadIdentityPoolProviderX509TrustStoreOutput{}) + pulumi.RegisterOutputType(GetWorkloadIdentityPoolProviderX509TrustStoreArrayOutput{}) + pulumi.RegisterOutputType(GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaOutput{}) + pulumi.RegisterOutputType(GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArrayOutput{}) + pulumi.RegisterOutputType(GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorOutput{}) + pulumi.RegisterOutputType(GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArrayOutput{}) } diff --git a/sdk/go/gcp/iam/workloadIdentityPoolProvider.go b/sdk/go/gcp/iam/workloadIdentityPoolProvider.go index a5882b5afa..da54c4ee74 100644 --- a/sdk/go/gcp/iam/workloadIdentityPoolProvider.go +++ b/sdk/go/gcp/iam/workloadIdentityPoolProvider.go @@ -333,6 +333,122 @@ import ( // } // // ``` +// ### Iam Workload Identity Pool Provider X509 Basic +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam" +// "github.com/pulumi/pulumi-std/sdk/go/std" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// pool, err := iam.NewWorkloadIdentityPool(ctx, "pool", &iam.WorkloadIdentityPoolArgs{ +// WorkloadIdentityPoolId: pulumi.String("example-pool"), +// }) +// if err != nil { +// return err +// } +// invokeFile, err := std.File(ctx, &std.FileArgs{ +// Input: "test-fixtures/trust_anchor.pem", +// }, nil) +// if err != nil { +// return err +// } +// _, err = iam.NewWorkloadIdentityPoolProvider(ctx, "example", &iam.WorkloadIdentityPoolProviderArgs{ +// WorkloadIdentityPoolId: pool.WorkloadIdentityPoolId, +// WorkloadIdentityPoolProviderId: pulumi.String("example-prvdr"), +// AttributeMapping: pulumi.StringMap{ +// "google.subject": pulumi.String("assertion.subject.dn.cn"), +// }, +// X509: &iam.WorkloadIdentityPoolProviderX509Args{ +// TrustStore: &iam.WorkloadIdentityPoolProviderX509TrustStoreArgs{ +// TrustAnchors: iam.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray{ +// &iam.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs{ +// PemCertificate: pulumi.String(invokeFile.Result), +// }, +// }, +// }, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// ### Iam Workload Identity Pool Provider X509 Full +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/iam" +// "github.com/pulumi/pulumi-std/sdk/go/std" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// pool, err := iam.NewWorkloadIdentityPool(ctx, "pool", &iam.WorkloadIdentityPoolArgs{ +// WorkloadIdentityPoolId: pulumi.String("example-pool"), +// }) +// if err != nil { +// return err +// } +// invokeFile, err := std.File(ctx, &std.FileArgs{ +// Input: "test-fixtures/trust_anchor.pem", +// }, nil) +// if err != nil { +// return err +// } +// invokeFile1, err := std.File(ctx, &std.FileArgs{ +// Input: "test-fixtures/intermediate_ca.pem", +// }, nil) +// if err != nil { +// return err +// } +// _, err = iam.NewWorkloadIdentityPoolProvider(ctx, "example", &iam.WorkloadIdentityPoolProviderArgs{ +// WorkloadIdentityPoolId: pool.WorkloadIdentityPoolId, +// WorkloadIdentityPoolProviderId: pulumi.String("example-prvdr"), +// DisplayName: pulumi.String("Name of provider"), +// Description: pulumi.String("X.509 identity pool provider for automated test"), +// Disabled: pulumi.Bool(true), +// AttributeMapping: pulumi.StringMap{ +// "google.subject": pulumi.String("assertion.subject.dn.cn"), +// }, +// X509: &iam.WorkloadIdentityPoolProviderX509Args{ +// TrustStore: &iam.WorkloadIdentityPoolProviderX509TrustStoreArgs{ +// TrustAnchors: iam.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArray{ +// &iam.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs{ +// PemCertificate: pulumi.String(invokeFile.Result), +// }, +// }, +// IntermediateCas: iam.WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArray{ +// &iam.WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs{ +// PemCertificate: pulumi.String(invokeFile1.Result), +// }, +// }, +// }, +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` // // ## Import // @@ -444,6 +560,10 @@ type WorkloadIdentityPoolProvider struct { // // *** WorkloadIdentityPoolProviderId pulumi.StringOutput `pulumi:"workloadIdentityPoolProviderId"` + // An X.509-type identity provider represents a CA. It is trusted to assert a + // client identity if the client has a certificate that chains up to this CA. + // Structure is documented below. + X509 WorkloadIdentityPoolProviderX509PtrOutput `pulumi:"x509"` } // NewWorkloadIdentityPoolProvider registers a new resource with the given unique name, arguments, and options. @@ -566,6 +686,10 @@ type workloadIdentityPoolProviderState struct { // // *** WorkloadIdentityPoolProviderId *string `pulumi:"workloadIdentityPoolProviderId"` + // An X.509-type identity provider represents a CA. It is trusted to assert a + // client identity if the client has a certificate that chains up to this CA. + // Structure is documented below. + X509 *WorkloadIdentityPoolProviderX509 `pulumi:"x509"` } type WorkloadIdentityPoolProviderState struct { @@ -653,6 +777,10 @@ type WorkloadIdentityPoolProviderState struct { // // *** WorkloadIdentityPoolProviderId pulumi.StringPtrInput + // An X.509-type identity provider represents a CA. It is trusted to assert a + // client identity if the client has a certificate that chains up to this CA. + // Structure is documented below. + X509 WorkloadIdentityPoolProviderX509PtrInput } func (WorkloadIdentityPoolProviderState) ElementType() reflect.Type { @@ -733,6 +861,10 @@ type workloadIdentityPoolProviderArgs struct { // // *** WorkloadIdentityPoolProviderId string `pulumi:"workloadIdentityPoolProviderId"` + // An X.509-type identity provider represents a CA. It is trusted to assert a + // client identity if the client has a certificate that chains up to this CA. + // Structure is documented below. + X509 *WorkloadIdentityPoolProviderX509 `pulumi:"x509"` } // The set of arguments for constructing a WorkloadIdentityPoolProvider resource. @@ -810,6 +942,10 @@ type WorkloadIdentityPoolProviderArgs struct { // // *** WorkloadIdentityPoolProviderId pulumi.StringInput + // An X.509-type identity provider represents a CA. It is trusted to assert a + // client identity if the client has a certificate that chains up to this CA. + // Structure is documented below. + X509 WorkloadIdentityPoolProviderX509PtrInput } func (WorkloadIdentityPoolProviderArgs) ElementType() reflect.Type { @@ -1022,6 +1158,13 @@ func (o WorkloadIdentityPoolProviderOutput) WorkloadIdentityPoolProviderId() pul return o.ApplyT(func(v *WorkloadIdentityPoolProvider) pulumi.StringOutput { return v.WorkloadIdentityPoolProviderId }).(pulumi.StringOutput) } +// An X.509-type identity provider represents a CA. It is trusted to assert a +// client identity if the client has a certificate that chains up to this CA. +// Structure is documented below. +func (o WorkloadIdentityPoolProviderOutput) X509() WorkloadIdentityPoolProviderX509PtrOutput { + return o.ApplyT(func(v *WorkloadIdentityPoolProvider) WorkloadIdentityPoolProviderX509PtrOutput { return v.X509 }).(WorkloadIdentityPoolProviderX509PtrOutput) +} + type WorkloadIdentityPoolProviderArrayOutput struct{ *pulumi.OutputState } func (WorkloadIdentityPoolProviderArrayOutput) ElementType() reflect.Type { diff --git a/sdk/go/gcp/kms/autokeyConfig.go b/sdk/go/gcp/kms/autokeyConfig.go index 04426c3769..32c95d23fd 100644 --- a/sdk/go/gcp/kms/autokeyConfig.go +++ b/sdk/go/gcp/kms/autokeyConfig.go @@ -118,7 +118,7 @@ import ( // return err // } // _, err = kms.NewAutokeyConfig(ctx, "example-autokeyconfig", &kms.AutokeyConfigArgs{ -// Folder: autokmsFolder.FolderId, +// Folder: autokmsFolder.ID(), // KeyProject: keyProject.ProjectId.ApplyT(func(projectId string) (string, error) { // return fmt.Sprintf("projects/%v", projectId), nil // }).(pulumi.StringOutput), @@ -128,6 +128,16 @@ import ( // if err != nil { // return err // } +// // Wait delay after setting AutokeyConfig, to prevent diffs on reapply, +// // because setting the config takes a little to fully propagate. +// _, err = time.NewSleep(ctx, "wait_autokey_propagation", &time.SleepArgs{ +// CreateDuration: "30s", +// }, pulumi.DependsOn([]pulumi.Resource{ +// example_autokeyconfig, +// })) +// if err != nil { +// return err +// } // return nil // }) // } diff --git a/sdk/go/gcp/kms/getCryptoKeyLatestVersion.go b/sdk/go/gcp/kms/getCryptoKeyLatestVersion.go new file mode 100644 index 0000000000..2707dbf762 --- /dev/null +++ b/sdk/go/gcp/kms/getCryptoKeyLatestVersion.go @@ -0,0 +1,191 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package kms + +import ( + "context" + "reflect" + + "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Provides access to the latest Google Cloud Platform KMS CryptoKeyVersion in a CryptoKey. For more information see +// [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) +// and +// [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). +// +// ## Example Usage +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// myKeyRing, err := kms.GetKMSKeyRing(ctx, &kms.GetKMSKeyRingArgs{ +// Name: "my-key-ring", +// Location: "us-central1", +// }, nil) +// if err != nil { +// return err +// } +// _, err = kms.GetKMSCryptoKey(ctx, &kms.GetKMSCryptoKeyArgs{ +// Name: "my-crypto-key", +// KeyRing: myKeyRing.Id, +// }, nil) +// if err != nil { +// return err +// } +// _, err = kms.GetCryptoKeyLatestVersion(ctx, &kms.GetCryptoKeyLatestVersionArgs{ +// CryptoKey: myKey.Id, +// }, nil) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +func GetCryptoKeyLatestVersion(ctx *pulumi.Context, args *GetCryptoKeyLatestVersionArgs, opts ...pulumi.InvokeOption) (*GetCryptoKeyLatestVersionResult, error) { + opts = internal.PkgInvokeDefaultOpts(opts) + var rv GetCryptoKeyLatestVersionResult + err := ctx.Invoke("gcp:kms/getCryptoKeyLatestVersion:getCryptoKeyLatestVersion", args, &rv, opts...) + if err != nil { + return nil, err + } + return &rv, nil +} + +// A collection of arguments for invoking getCryptoKeyLatestVersion. +type GetCryptoKeyLatestVersionArgs struct { + // The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + // `kms.CryptoKey` resource/datasource. + CryptoKey string `pulumi:"cryptoKey"` + // The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + // + // Example filter values if filtering on state. + // + // * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + // + // [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + Filter *string `pulumi:"filter"` +} + +// A collection of values returned by getCryptoKeyLatestVersion. +type GetCryptoKeyLatestVersionResult struct { + // The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + Algorithm string `pulumi:"algorithm"` + CryptoKey string `pulumi:"cryptoKey"` + Filter *string `pulumi:"filter"` + // The provider-assigned unique ID for this managed resource. + Id string `pulumi:"id"` + Name string `pulumi:"name"` + // The ProtectionLevel describing how crypto operations are performed with this CryptoKeyVersion. See the [protectionLevel reference](https://cloud.google.com/kms/docs/reference/rest/v1/ProtectionLevel) for possible outputs. + ProtectionLevel string `pulumi:"protectionLevel"` + // If the enclosing CryptoKey has purpose `ASYMMETRIC_SIGN` or `ASYMMETRIC_DECRYPT`, this block contains details about the public key associated to this CryptoKeyVersion. Structure is documented below. + PublicKeys []GetCryptoKeyLatestVersionPublicKey `pulumi:"publicKeys"` + // The current state of the latest CryptoKeyVersion. See the [state reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions#CryptoKeyVersion.CryptoKeyVersionState) for possible outputs. + State string `pulumi:"state"` + Version int `pulumi:"version"` +} + +func GetCryptoKeyLatestVersionOutput(ctx *pulumi.Context, args GetCryptoKeyLatestVersionOutputArgs, opts ...pulumi.InvokeOption) GetCryptoKeyLatestVersionResultOutput { + return pulumi.ToOutputWithContext(context.Background(), args). + ApplyT(func(v interface{}) (GetCryptoKeyLatestVersionResult, error) { + args := v.(GetCryptoKeyLatestVersionArgs) + r, err := GetCryptoKeyLatestVersion(ctx, &args, opts...) + var s GetCryptoKeyLatestVersionResult + if r != nil { + s = *r + } + return s, err + }).(GetCryptoKeyLatestVersionResultOutput) +} + +// A collection of arguments for invoking getCryptoKeyLatestVersion. +type GetCryptoKeyLatestVersionOutputArgs struct { + // The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + // `kms.CryptoKey` resource/datasource. + CryptoKey pulumi.StringInput `pulumi:"cryptoKey"` + // The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + // + // Example filter values if filtering on state. + // + // * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + // + // [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + Filter pulumi.StringPtrInput `pulumi:"filter"` +} + +func (GetCryptoKeyLatestVersionOutputArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetCryptoKeyLatestVersionArgs)(nil)).Elem() +} + +// A collection of values returned by getCryptoKeyLatestVersion. +type GetCryptoKeyLatestVersionResultOutput struct{ *pulumi.OutputState } + +func (GetCryptoKeyLatestVersionResultOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetCryptoKeyLatestVersionResult)(nil)).Elem() +} + +func (o GetCryptoKeyLatestVersionResultOutput) ToGetCryptoKeyLatestVersionResultOutput() GetCryptoKeyLatestVersionResultOutput { + return o +} + +func (o GetCryptoKeyLatestVersionResultOutput) ToGetCryptoKeyLatestVersionResultOutputWithContext(ctx context.Context) GetCryptoKeyLatestVersionResultOutput { + return o +} + +// The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. +func (o GetCryptoKeyLatestVersionResultOutput) Algorithm() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyLatestVersionResult) string { return v.Algorithm }).(pulumi.StringOutput) +} + +func (o GetCryptoKeyLatestVersionResultOutput) CryptoKey() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyLatestVersionResult) string { return v.CryptoKey }).(pulumi.StringOutput) +} + +func (o GetCryptoKeyLatestVersionResultOutput) Filter() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCryptoKeyLatestVersionResult) *string { return v.Filter }).(pulumi.StringPtrOutput) +} + +// The provider-assigned unique ID for this managed resource. +func (o GetCryptoKeyLatestVersionResultOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyLatestVersionResult) string { return v.Id }).(pulumi.StringOutput) +} + +func (o GetCryptoKeyLatestVersionResultOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyLatestVersionResult) string { return v.Name }).(pulumi.StringOutput) +} + +// The ProtectionLevel describing how crypto operations are performed with this CryptoKeyVersion. See the [protectionLevel reference](https://cloud.google.com/kms/docs/reference/rest/v1/ProtectionLevel) for possible outputs. +func (o GetCryptoKeyLatestVersionResultOutput) ProtectionLevel() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyLatestVersionResult) string { return v.ProtectionLevel }).(pulumi.StringOutput) +} + +// If the enclosing CryptoKey has purpose `ASYMMETRIC_SIGN` or `ASYMMETRIC_DECRYPT`, this block contains details about the public key associated to this CryptoKeyVersion. Structure is documented below. +func (o GetCryptoKeyLatestVersionResultOutput) PublicKeys() GetCryptoKeyLatestVersionPublicKeyArrayOutput { + return o.ApplyT(func(v GetCryptoKeyLatestVersionResult) []GetCryptoKeyLatestVersionPublicKey { return v.PublicKeys }).(GetCryptoKeyLatestVersionPublicKeyArrayOutput) +} + +// The current state of the latest CryptoKeyVersion. See the [state reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions#CryptoKeyVersion.CryptoKeyVersionState) for possible outputs. +func (o GetCryptoKeyLatestVersionResultOutput) State() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyLatestVersionResult) string { return v.State }).(pulumi.StringOutput) +} + +func (o GetCryptoKeyLatestVersionResultOutput) Version() pulumi.IntOutput { + return o.ApplyT(func(v GetCryptoKeyLatestVersionResult) int { return v.Version }).(pulumi.IntOutput) +} + +func init() { + pulumi.RegisterOutputType(GetCryptoKeyLatestVersionResultOutput{}) +} diff --git a/sdk/go/gcp/kms/getCryptoKeyVersions.go b/sdk/go/gcp/kms/getCryptoKeyVersions.go new file mode 100644 index 0000000000..2e9116c535 --- /dev/null +++ b/sdk/go/gcp/kms/getCryptoKeyVersions.go @@ -0,0 +1,167 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package kms + +import ( + "context" + "reflect" + + "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Provides access to Google Cloud Platform KMS CryptoKeyVersions. For more information see +// [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) +// and +// [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). +// +// ## Example Usage +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// myKeyRing, err := kms.GetKMSKeyRing(ctx, &kms.GetKMSKeyRingArgs{ +// Name: "my-key-ring", +// Location: "us-central1", +// }, nil) +// if err != nil { +// return err +// } +// _, err = kms.GetKMSCryptoKey(ctx, &kms.GetKMSCryptoKeyArgs{ +// Name: "my-crypto-key", +// KeyRing: myKeyRing.Id, +// }, nil) +// if err != nil { +// return err +// } +// _, err = kms.GetCryptoKeyVersions(ctx, &kms.GetCryptoKeyVersionsArgs{ +// CryptoKey: myKey.Id, +// }, nil) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +func GetCryptoKeyVersions(ctx *pulumi.Context, args *GetCryptoKeyVersionsArgs, opts ...pulumi.InvokeOption) (*GetCryptoKeyVersionsResult, error) { + opts = internal.PkgInvokeDefaultOpts(opts) + var rv GetCryptoKeyVersionsResult + err := ctx.Invoke("gcp:kms/getCryptoKeyVersions:getCryptoKeyVersions", args, &rv, opts...) + if err != nil { + return nil, err + } + return &rv, nil +} + +// A collection of arguments for invoking getCryptoKeyVersions. +type GetCryptoKeyVersionsArgs struct { + // The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + // `kms.CryptoKey` resource/datasource. + CryptoKey string `pulumi:"cryptoKey"` + // The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + // + // Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + // + // * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + // * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + // + // [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + Filter *string `pulumi:"filter"` +} + +// A collection of values returned by getCryptoKeyVersions. +type GetCryptoKeyVersionsResult struct { + CryptoKey string `pulumi:"cryptoKey"` + Filter *string `pulumi:"filter"` + // The provider-assigned unique ID for this managed resource. + Id string `pulumi:"id"` + PublicKeys []GetCryptoKeyVersionsPublicKey `pulumi:"publicKeys"` + // A list of all the retrieved crypto key versions from the provided crypto key. This list is influenced by the provided filter argument. + Versions []GetCryptoKeyVersionsVersion `pulumi:"versions"` +} + +func GetCryptoKeyVersionsOutput(ctx *pulumi.Context, args GetCryptoKeyVersionsOutputArgs, opts ...pulumi.InvokeOption) GetCryptoKeyVersionsResultOutput { + return pulumi.ToOutputWithContext(context.Background(), args). + ApplyT(func(v interface{}) (GetCryptoKeyVersionsResult, error) { + args := v.(GetCryptoKeyVersionsArgs) + r, err := GetCryptoKeyVersions(ctx, &args, opts...) + var s GetCryptoKeyVersionsResult + if r != nil { + s = *r + } + return s, err + }).(GetCryptoKeyVersionsResultOutput) +} + +// A collection of arguments for invoking getCryptoKeyVersions. +type GetCryptoKeyVersionsOutputArgs struct { + // The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + // `kms.CryptoKey` resource/datasource. + CryptoKey pulumi.StringInput `pulumi:"cryptoKey"` + // The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + // + // Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + // + // * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + // * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + // + // [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + Filter pulumi.StringPtrInput `pulumi:"filter"` +} + +func (GetCryptoKeyVersionsOutputArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetCryptoKeyVersionsArgs)(nil)).Elem() +} + +// A collection of values returned by getCryptoKeyVersions. +type GetCryptoKeyVersionsResultOutput struct{ *pulumi.OutputState } + +func (GetCryptoKeyVersionsResultOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetCryptoKeyVersionsResult)(nil)).Elem() +} + +func (o GetCryptoKeyVersionsResultOutput) ToGetCryptoKeyVersionsResultOutput() GetCryptoKeyVersionsResultOutput { + return o +} + +func (o GetCryptoKeyVersionsResultOutput) ToGetCryptoKeyVersionsResultOutputWithContext(ctx context.Context) GetCryptoKeyVersionsResultOutput { + return o +} + +func (o GetCryptoKeyVersionsResultOutput) CryptoKey() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsResult) string { return v.CryptoKey }).(pulumi.StringOutput) +} + +func (o GetCryptoKeyVersionsResultOutput) Filter() pulumi.StringPtrOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsResult) *string { return v.Filter }).(pulumi.StringPtrOutput) +} + +// The provider-assigned unique ID for this managed resource. +func (o GetCryptoKeyVersionsResultOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsResult) string { return v.Id }).(pulumi.StringOutput) +} + +func (o GetCryptoKeyVersionsResultOutput) PublicKeys() GetCryptoKeyVersionsPublicKeyArrayOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsResult) []GetCryptoKeyVersionsPublicKey { return v.PublicKeys }).(GetCryptoKeyVersionsPublicKeyArrayOutput) +} + +// A list of all the retrieved crypto key versions from the provided crypto key. This list is influenced by the provided filter argument. +func (o GetCryptoKeyVersionsResultOutput) Versions() GetCryptoKeyVersionsVersionArrayOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsResult) []GetCryptoKeyVersionsVersion { return v.Versions }).(GetCryptoKeyVersionsVersionArrayOutput) +} + +func init() { + pulumi.RegisterOutputType(GetCryptoKeyVersionsResultOutput{}) +} diff --git a/sdk/go/gcp/kms/pulumiTypes.go b/sdk/go/gcp/kms/pulumiTypes.go index 6bb0f5be12..9e28d11408 100644 --- a/sdk/go/gcp/kms/pulumiTypes.go +++ b/sdk/go/gcp/kms/pulumiTypes.go @@ -2718,6 +2718,469 @@ func (o KeyRingImportJobPublicKeyArrayOutput) Index(i pulumi.IntInput) KeyRingIm }).(KeyRingImportJobPublicKeyOutput) } +type GetCryptoKeyLatestVersionPublicKey struct { + // The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + Algorithm string `pulumi:"algorithm"` + // The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + Pem string `pulumi:"pem"` +} + +// GetCryptoKeyLatestVersionPublicKeyInput is an input type that accepts GetCryptoKeyLatestVersionPublicKeyArgs and GetCryptoKeyLatestVersionPublicKeyOutput values. +// You can construct a concrete instance of `GetCryptoKeyLatestVersionPublicKeyInput` via: +// +// GetCryptoKeyLatestVersionPublicKeyArgs{...} +type GetCryptoKeyLatestVersionPublicKeyInput interface { + pulumi.Input + + ToGetCryptoKeyLatestVersionPublicKeyOutput() GetCryptoKeyLatestVersionPublicKeyOutput + ToGetCryptoKeyLatestVersionPublicKeyOutputWithContext(context.Context) GetCryptoKeyLatestVersionPublicKeyOutput +} + +type GetCryptoKeyLatestVersionPublicKeyArgs struct { + // The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + Algorithm pulumi.StringInput `pulumi:"algorithm"` + // The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + Pem pulumi.StringInput `pulumi:"pem"` +} + +func (GetCryptoKeyLatestVersionPublicKeyArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetCryptoKeyLatestVersionPublicKey)(nil)).Elem() +} + +func (i GetCryptoKeyLatestVersionPublicKeyArgs) ToGetCryptoKeyLatestVersionPublicKeyOutput() GetCryptoKeyLatestVersionPublicKeyOutput { + return i.ToGetCryptoKeyLatestVersionPublicKeyOutputWithContext(context.Background()) +} + +func (i GetCryptoKeyLatestVersionPublicKeyArgs) ToGetCryptoKeyLatestVersionPublicKeyOutputWithContext(ctx context.Context) GetCryptoKeyLatestVersionPublicKeyOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCryptoKeyLatestVersionPublicKeyOutput) +} + +// GetCryptoKeyLatestVersionPublicKeyArrayInput is an input type that accepts GetCryptoKeyLatestVersionPublicKeyArray and GetCryptoKeyLatestVersionPublicKeyArrayOutput values. +// You can construct a concrete instance of `GetCryptoKeyLatestVersionPublicKeyArrayInput` via: +// +// GetCryptoKeyLatestVersionPublicKeyArray{ GetCryptoKeyLatestVersionPublicKeyArgs{...} } +type GetCryptoKeyLatestVersionPublicKeyArrayInput interface { + pulumi.Input + + ToGetCryptoKeyLatestVersionPublicKeyArrayOutput() GetCryptoKeyLatestVersionPublicKeyArrayOutput + ToGetCryptoKeyLatestVersionPublicKeyArrayOutputWithContext(context.Context) GetCryptoKeyLatestVersionPublicKeyArrayOutput +} + +type GetCryptoKeyLatestVersionPublicKeyArray []GetCryptoKeyLatestVersionPublicKeyInput + +func (GetCryptoKeyLatestVersionPublicKeyArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCryptoKeyLatestVersionPublicKey)(nil)).Elem() +} + +func (i GetCryptoKeyLatestVersionPublicKeyArray) ToGetCryptoKeyLatestVersionPublicKeyArrayOutput() GetCryptoKeyLatestVersionPublicKeyArrayOutput { + return i.ToGetCryptoKeyLatestVersionPublicKeyArrayOutputWithContext(context.Background()) +} + +func (i GetCryptoKeyLatestVersionPublicKeyArray) ToGetCryptoKeyLatestVersionPublicKeyArrayOutputWithContext(ctx context.Context) GetCryptoKeyLatestVersionPublicKeyArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCryptoKeyLatestVersionPublicKeyArrayOutput) +} + +type GetCryptoKeyLatestVersionPublicKeyOutput struct{ *pulumi.OutputState } + +func (GetCryptoKeyLatestVersionPublicKeyOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetCryptoKeyLatestVersionPublicKey)(nil)).Elem() +} + +func (o GetCryptoKeyLatestVersionPublicKeyOutput) ToGetCryptoKeyLatestVersionPublicKeyOutput() GetCryptoKeyLatestVersionPublicKeyOutput { + return o +} + +func (o GetCryptoKeyLatestVersionPublicKeyOutput) ToGetCryptoKeyLatestVersionPublicKeyOutputWithContext(ctx context.Context) GetCryptoKeyLatestVersionPublicKeyOutput { + return o +} + +// The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. +func (o GetCryptoKeyLatestVersionPublicKeyOutput) Algorithm() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyLatestVersionPublicKey) string { return v.Algorithm }).(pulumi.StringOutput) +} + +// The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. +func (o GetCryptoKeyLatestVersionPublicKeyOutput) Pem() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyLatestVersionPublicKey) string { return v.Pem }).(pulumi.StringOutput) +} + +type GetCryptoKeyLatestVersionPublicKeyArrayOutput struct{ *pulumi.OutputState } + +func (GetCryptoKeyLatestVersionPublicKeyArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCryptoKeyLatestVersionPublicKey)(nil)).Elem() +} + +func (o GetCryptoKeyLatestVersionPublicKeyArrayOutput) ToGetCryptoKeyLatestVersionPublicKeyArrayOutput() GetCryptoKeyLatestVersionPublicKeyArrayOutput { + return o +} + +func (o GetCryptoKeyLatestVersionPublicKeyArrayOutput) ToGetCryptoKeyLatestVersionPublicKeyArrayOutputWithContext(ctx context.Context) GetCryptoKeyLatestVersionPublicKeyArrayOutput { + return o +} + +func (o GetCryptoKeyLatestVersionPublicKeyArrayOutput) Index(i pulumi.IntInput) GetCryptoKeyLatestVersionPublicKeyOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetCryptoKeyLatestVersionPublicKey { + return vs[0].([]GetCryptoKeyLatestVersionPublicKey)[vs[1].(int)] + }).(GetCryptoKeyLatestVersionPublicKeyOutput) +} + +type GetCryptoKeyVersionsPublicKey struct { + // The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + Algorithm string `pulumi:"algorithm"` + // The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + Pem string `pulumi:"pem"` +} + +// GetCryptoKeyVersionsPublicKeyInput is an input type that accepts GetCryptoKeyVersionsPublicKeyArgs and GetCryptoKeyVersionsPublicKeyOutput values. +// You can construct a concrete instance of `GetCryptoKeyVersionsPublicKeyInput` via: +// +// GetCryptoKeyVersionsPublicKeyArgs{...} +type GetCryptoKeyVersionsPublicKeyInput interface { + pulumi.Input + + ToGetCryptoKeyVersionsPublicKeyOutput() GetCryptoKeyVersionsPublicKeyOutput + ToGetCryptoKeyVersionsPublicKeyOutputWithContext(context.Context) GetCryptoKeyVersionsPublicKeyOutput +} + +type GetCryptoKeyVersionsPublicKeyArgs struct { + // The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + Algorithm pulumi.StringInput `pulumi:"algorithm"` + // The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + Pem pulumi.StringInput `pulumi:"pem"` +} + +func (GetCryptoKeyVersionsPublicKeyArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetCryptoKeyVersionsPublicKey)(nil)).Elem() +} + +func (i GetCryptoKeyVersionsPublicKeyArgs) ToGetCryptoKeyVersionsPublicKeyOutput() GetCryptoKeyVersionsPublicKeyOutput { + return i.ToGetCryptoKeyVersionsPublicKeyOutputWithContext(context.Background()) +} + +func (i GetCryptoKeyVersionsPublicKeyArgs) ToGetCryptoKeyVersionsPublicKeyOutputWithContext(ctx context.Context) GetCryptoKeyVersionsPublicKeyOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCryptoKeyVersionsPublicKeyOutput) +} + +// GetCryptoKeyVersionsPublicKeyArrayInput is an input type that accepts GetCryptoKeyVersionsPublicKeyArray and GetCryptoKeyVersionsPublicKeyArrayOutput values. +// You can construct a concrete instance of `GetCryptoKeyVersionsPublicKeyArrayInput` via: +// +// GetCryptoKeyVersionsPublicKeyArray{ GetCryptoKeyVersionsPublicKeyArgs{...} } +type GetCryptoKeyVersionsPublicKeyArrayInput interface { + pulumi.Input + + ToGetCryptoKeyVersionsPublicKeyArrayOutput() GetCryptoKeyVersionsPublicKeyArrayOutput + ToGetCryptoKeyVersionsPublicKeyArrayOutputWithContext(context.Context) GetCryptoKeyVersionsPublicKeyArrayOutput +} + +type GetCryptoKeyVersionsPublicKeyArray []GetCryptoKeyVersionsPublicKeyInput + +func (GetCryptoKeyVersionsPublicKeyArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCryptoKeyVersionsPublicKey)(nil)).Elem() +} + +func (i GetCryptoKeyVersionsPublicKeyArray) ToGetCryptoKeyVersionsPublicKeyArrayOutput() GetCryptoKeyVersionsPublicKeyArrayOutput { + return i.ToGetCryptoKeyVersionsPublicKeyArrayOutputWithContext(context.Background()) +} + +func (i GetCryptoKeyVersionsPublicKeyArray) ToGetCryptoKeyVersionsPublicKeyArrayOutputWithContext(ctx context.Context) GetCryptoKeyVersionsPublicKeyArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCryptoKeyVersionsPublicKeyArrayOutput) +} + +type GetCryptoKeyVersionsPublicKeyOutput struct{ *pulumi.OutputState } + +func (GetCryptoKeyVersionsPublicKeyOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetCryptoKeyVersionsPublicKey)(nil)).Elem() +} + +func (o GetCryptoKeyVersionsPublicKeyOutput) ToGetCryptoKeyVersionsPublicKeyOutput() GetCryptoKeyVersionsPublicKeyOutput { + return o +} + +func (o GetCryptoKeyVersionsPublicKeyOutput) ToGetCryptoKeyVersionsPublicKeyOutputWithContext(ctx context.Context) GetCryptoKeyVersionsPublicKeyOutput { + return o +} + +// The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. +func (o GetCryptoKeyVersionsPublicKeyOutput) Algorithm() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsPublicKey) string { return v.Algorithm }).(pulumi.StringOutput) +} + +// The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. +func (o GetCryptoKeyVersionsPublicKeyOutput) Pem() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsPublicKey) string { return v.Pem }).(pulumi.StringOutput) +} + +type GetCryptoKeyVersionsPublicKeyArrayOutput struct{ *pulumi.OutputState } + +func (GetCryptoKeyVersionsPublicKeyArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCryptoKeyVersionsPublicKey)(nil)).Elem() +} + +func (o GetCryptoKeyVersionsPublicKeyArrayOutput) ToGetCryptoKeyVersionsPublicKeyArrayOutput() GetCryptoKeyVersionsPublicKeyArrayOutput { + return o +} + +func (o GetCryptoKeyVersionsPublicKeyArrayOutput) ToGetCryptoKeyVersionsPublicKeyArrayOutputWithContext(ctx context.Context) GetCryptoKeyVersionsPublicKeyArrayOutput { + return o +} + +func (o GetCryptoKeyVersionsPublicKeyArrayOutput) Index(i pulumi.IntInput) GetCryptoKeyVersionsPublicKeyOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetCryptoKeyVersionsPublicKey { + return vs[0].([]GetCryptoKeyVersionsPublicKey)[vs[1].(int)] + }).(GetCryptoKeyVersionsPublicKeyOutput) +} + +type GetCryptoKeyVersionsVersion struct { + // The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + Algorithm string `pulumi:"algorithm"` + // The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + // `kms.CryptoKey` resource/datasource. + CryptoKey string `pulumi:"cryptoKey"` + Id string `pulumi:"id"` + Name string `pulumi:"name"` + ProtectionLevel string `pulumi:"protectionLevel"` + PublicKeys []GetCryptoKeyVersionsVersionPublicKey `pulumi:"publicKeys"` + State string `pulumi:"state"` + Version int `pulumi:"version"` +} + +// GetCryptoKeyVersionsVersionInput is an input type that accepts GetCryptoKeyVersionsVersionArgs and GetCryptoKeyVersionsVersionOutput values. +// You can construct a concrete instance of `GetCryptoKeyVersionsVersionInput` via: +// +// GetCryptoKeyVersionsVersionArgs{...} +type GetCryptoKeyVersionsVersionInput interface { + pulumi.Input + + ToGetCryptoKeyVersionsVersionOutput() GetCryptoKeyVersionsVersionOutput + ToGetCryptoKeyVersionsVersionOutputWithContext(context.Context) GetCryptoKeyVersionsVersionOutput +} + +type GetCryptoKeyVersionsVersionArgs struct { + // The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + Algorithm pulumi.StringInput `pulumi:"algorithm"` + // The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + // `kms.CryptoKey` resource/datasource. + CryptoKey pulumi.StringInput `pulumi:"cryptoKey"` + Id pulumi.StringInput `pulumi:"id"` + Name pulumi.StringInput `pulumi:"name"` + ProtectionLevel pulumi.StringInput `pulumi:"protectionLevel"` + PublicKeys GetCryptoKeyVersionsVersionPublicKeyArrayInput `pulumi:"publicKeys"` + State pulumi.StringInput `pulumi:"state"` + Version pulumi.IntInput `pulumi:"version"` +} + +func (GetCryptoKeyVersionsVersionArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetCryptoKeyVersionsVersion)(nil)).Elem() +} + +func (i GetCryptoKeyVersionsVersionArgs) ToGetCryptoKeyVersionsVersionOutput() GetCryptoKeyVersionsVersionOutput { + return i.ToGetCryptoKeyVersionsVersionOutputWithContext(context.Background()) +} + +func (i GetCryptoKeyVersionsVersionArgs) ToGetCryptoKeyVersionsVersionOutputWithContext(ctx context.Context) GetCryptoKeyVersionsVersionOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCryptoKeyVersionsVersionOutput) +} + +// GetCryptoKeyVersionsVersionArrayInput is an input type that accepts GetCryptoKeyVersionsVersionArray and GetCryptoKeyVersionsVersionArrayOutput values. +// You can construct a concrete instance of `GetCryptoKeyVersionsVersionArrayInput` via: +// +// GetCryptoKeyVersionsVersionArray{ GetCryptoKeyVersionsVersionArgs{...} } +type GetCryptoKeyVersionsVersionArrayInput interface { + pulumi.Input + + ToGetCryptoKeyVersionsVersionArrayOutput() GetCryptoKeyVersionsVersionArrayOutput + ToGetCryptoKeyVersionsVersionArrayOutputWithContext(context.Context) GetCryptoKeyVersionsVersionArrayOutput +} + +type GetCryptoKeyVersionsVersionArray []GetCryptoKeyVersionsVersionInput + +func (GetCryptoKeyVersionsVersionArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCryptoKeyVersionsVersion)(nil)).Elem() +} + +func (i GetCryptoKeyVersionsVersionArray) ToGetCryptoKeyVersionsVersionArrayOutput() GetCryptoKeyVersionsVersionArrayOutput { + return i.ToGetCryptoKeyVersionsVersionArrayOutputWithContext(context.Background()) +} + +func (i GetCryptoKeyVersionsVersionArray) ToGetCryptoKeyVersionsVersionArrayOutputWithContext(ctx context.Context) GetCryptoKeyVersionsVersionArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCryptoKeyVersionsVersionArrayOutput) +} + +type GetCryptoKeyVersionsVersionOutput struct{ *pulumi.OutputState } + +func (GetCryptoKeyVersionsVersionOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetCryptoKeyVersionsVersion)(nil)).Elem() +} + +func (o GetCryptoKeyVersionsVersionOutput) ToGetCryptoKeyVersionsVersionOutput() GetCryptoKeyVersionsVersionOutput { + return o +} + +func (o GetCryptoKeyVersionsVersionOutput) ToGetCryptoKeyVersionsVersionOutputWithContext(ctx context.Context) GetCryptoKeyVersionsVersionOutput { + return o +} + +// The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. +func (o GetCryptoKeyVersionsVersionOutput) Algorithm() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsVersion) string { return v.Algorithm }).(pulumi.StringOutput) +} + +// The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the +// `kms.CryptoKey` resource/datasource. +func (o GetCryptoKeyVersionsVersionOutput) CryptoKey() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsVersion) string { return v.CryptoKey }).(pulumi.StringOutput) +} + +func (o GetCryptoKeyVersionsVersionOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsVersion) string { return v.Id }).(pulumi.StringOutput) +} + +func (o GetCryptoKeyVersionsVersionOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsVersion) string { return v.Name }).(pulumi.StringOutput) +} + +func (o GetCryptoKeyVersionsVersionOutput) ProtectionLevel() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsVersion) string { return v.ProtectionLevel }).(pulumi.StringOutput) +} + +func (o GetCryptoKeyVersionsVersionOutput) PublicKeys() GetCryptoKeyVersionsVersionPublicKeyArrayOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsVersion) []GetCryptoKeyVersionsVersionPublicKey { return v.PublicKeys }).(GetCryptoKeyVersionsVersionPublicKeyArrayOutput) +} + +func (o GetCryptoKeyVersionsVersionOutput) State() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsVersion) string { return v.State }).(pulumi.StringOutput) +} + +func (o GetCryptoKeyVersionsVersionOutput) Version() pulumi.IntOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsVersion) int { return v.Version }).(pulumi.IntOutput) +} + +type GetCryptoKeyVersionsVersionArrayOutput struct{ *pulumi.OutputState } + +func (GetCryptoKeyVersionsVersionArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCryptoKeyVersionsVersion)(nil)).Elem() +} + +func (o GetCryptoKeyVersionsVersionArrayOutput) ToGetCryptoKeyVersionsVersionArrayOutput() GetCryptoKeyVersionsVersionArrayOutput { + return o +} + +func (o GetCryptoKeyVersionsVersionArrayOutput) ToGetCryptoKeyVersionsVersionArrayOutputWithContext(ctx context.Context) GetCryptoKeyVersionsVersionArrayOutput { + return o +} + +func (o GetCryptoKeyVersionsVersionArrayOutput) Index(i pulumi.IntInput) GetCryptoKeyVersionsVersionOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetCryptoKeyVersionsVersion { + return vs[0].([]GetCryptoKeyVersionsVersion)[vs[1].(int)] + }).(GetCryptoKeyVersionsVersionOutput) +} + +type GetCryptoKeyVersionsVersionPublicKey struct { + // The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + Algorithm string `pulumi:"algorithm"` + // The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + Pem string `pulumi:"pem"` +} + +// GetCryptoKeyVersionsVersionPublicKeyInput is an input type that accepts GetCryptoKeyVersionsVersionPublicKeyArgs and GetCryptoKeyVersionsVersionPublicKeyOutput values. +// You can construct a concrete instance of `GetCryptoKeyVersionsVersionPublicKeyInput` via: +// +// GetCryptoKeyVersionsVersionPublicKeyArgs{...} +type GetCryptoKeyVersionsVersionPublicKeyInput interface { + pulumi.Input + + ToGetCryptoKeyVersionsVersionPublicKeyOutput() GetCryptoKeyVersionsVersionPublicKeyOutput + ToGetCryptoKeyVersionsVersionPublicKeyOutputWithContext(context.Context) GetCryptoKeyVersionsVersionPublicKeyOutput +} + +type GetCryptoKeyVersionsVersionPublicKeyArgs struct { + // The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + Algorithm pulumi.StringInput `pulumi:"algorithm"` + // The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + Pem pulumi.StringInput `pulumi:"pem"` +} + +func (GetCryptoKeyVersionsVersionPublicKeyArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetCryptoKeyVersionsVersionPublicKey)(nil)).Elem() +} + +func (i GetCryptoKeyVersionsVersionPublicKeyArgs) ToGetCryptoKeyVersionsVersionPublicKeyOutput() GetCryptoKeyVersionsVersionPublicKeyOutput { + return i.ToGetCryptoKeyVersionsVersionPublicKeyOutputWithContext(context.Background()) +} + +func (i GetCryptoKeyVersionsVersionPublicKeyArgs) ToGetCryptoKeyVersionsVersionPublicKeyOutputWithContext(ctx context.Context) GetCryptoKeyVersionsVersionPublicKeyOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCryptoKeyVersionsVersionPublicKeyOutput) +} + +// GetCryptoKeyVersionsVersionPublicKeyArrayInput is an input type that accepts GetCryptoKeyVersionsVersionPublicKeyArray and GetCryptoKeyVersionsVersionPublicKeyArrayOutput values. +// You can construct a concrete instance of `GetCryptoKeyVersionsVersionPublicKeyArrayInput` via: +// +// GetCryptoKeyVersionsVersionPublicKeyArray{ GetCryptoKeyVersionsVersionPublicKeyArgs{...} } +type GetCryptoKeyVersionsVersionPublicKeyArrayInput interface { + pulumi.Input + + ToGetCryptoKeyVersionsVersionPublicKeyArrayOutput() GetCryptoKeyVersionsVersionPublicKeyArrayOutput + ToGetCryptoKeyVersionsVersionPublicKeyArrayOutputWithContext(context.Context) GetCryptoKeyVersionsVersionPublicKeyArrayOutput +} + +type GetCryptoKeyVersionsVersionPublicKeyArray []GetCryptoKeyVersionsVersionPublicKeyInput + +func (GetCryptoKeyVersionsVersionPublicKeyArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCryptoKeyVersionsVersionPublicKey)(nil)).Elem() +} + +func (i GetCryptoKeyVersionsVersionPublicKeyArray) ToGetCryptoKeyVersionsVersionPublicKeyArrayOutput() GetCryptoKeyVersionsVersionPublicKeyArrayOutput { + return i.ToGetCryptoKeyVersionsVersionPublicKeyArrayOutputWithContext(context.Background()) +} + +func (i GetCryptoKeyVersionsVersionPublicKeyArray) ToGetCryptoKeyVersionsVersionPublicKeyArrayOutputWithContext(ctx context.Context) GetCryptoKeyVersionsVersionPublicKeyArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetCryptoKeyVersionsVersionPublicKeyArrayOutput) +} + +type GetCryptoKeyVersionsVersionPublicKeyOutput struct{ *pulumi.OutputState } + +func (GetCryptoKeyVersionsVersionPublicKeyOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetCryptoKeyVersionsVersionPublicKey)(nil)).Elem() +} + +func (o GetCryptoKeyVersionsVersionPublicKeyOutput) ToGetCryptoKeyVersionsVersionPublicKeyOutput() GetCryptoKeyVersionsVersionPublicKeyOutput { + return o +} + +func (o GetCryptoKeyVersionsVersionPublicKeyOutput) ToGetCryptoKeyVersionsVersionPublicKeyOutputWithContext(ctx context.Context) GetCryptoKeyVersionsVersionPublicKeyOutput { + return o +} + +// The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. +func (o GetCryptoKeyVersionsVersionPublicKeyOutput) Algorithm() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsVersionPublicKey) string { return v.Algorithm }).(pulumi.StringOutput) +} + +// The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. +func (o GetCryptoKeyVersionsVersionPublicKeyOutput) Pem() pulumi.StringOutput { + return o.ApplyT(func(v GetCryptoKeyVersionsVersionPublicKey) string { return v.Pem }).(pulumi.StringOutput) +} + +type GetCryptoKeyVersionsVersionPublicKeyArrayOutput struct{ *pulumi.OutputState } + +func (GetCryptoKeyVersionsVersionPublicKeyArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetCryptoKeyVersionsVersionPublicKey)(nil)).Elem() +} + +func (o GetCryptoKeyVersionsVersionPublicKeyArrayOutput) ToGetCryptoKeyVersionsVersionPublicKeyArrayOutput() GetCryptoKeyVersionsVersionPublicKeyArrayOutput { + return o +} + +func (o GetCryptoKeyVersionsVersionPublicKeyArrayOutput) ToGetCryptoKeyVersionsVersionPublicKeyArrayOutputWithContext(ctx context.Context) GetCryptoKeyVersionsVersionPublicKeyArrayOutput { + return o +} + +func (o GetCryptoKeyVersionsVersionPublicKeyArrayOutput) Index(i pulumi.IntInput) GetCryptoKeyVersionsVersionPublicKeyOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetCryptoKeyVersionsVersionPublicKey { + return vs[0].([]GetCryptoKeyVersionsVersionPublicKey)[vs[1].(int)] + }).(GetCryptoKeyVersionsVersionPublicKeyOutput) +} + type GetCryptoKeysKey struct { // The resource name of the backend environment associated with all CryptoKeyVersions within this CryptoKey. // The resource name is in the format "projects/*/locations/*/ekmConnections/*" and only applies to "EXTERNAL_VPC" keys. @@ -3884,6 +4347,14 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*KeyRingImportJobAttestationArrayInput)(nil)).Elem(), KeyRingImportJobAttestationArray{}) pulumi.RegisterInputType(reflect.TypeOf((*KeyRingImportJobPublicKeyInput)(nil)).Elem(), KeyRingImportJobPublicKeyArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*KeyRingImportJobPublicKeyArrayInput)(nil)).Elem(), KeyRingImportJobPublicKeyArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCryptoKeyLatestVersionPublicKeyInput)(nil)).Elem(), GetCryptoKeyLatestVersionPublicKeyArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCryptoKeyLatestVersionPublicKeyArrayInput)(nil)).Elem(), GetCryptoKeyLatestVersionPublicKeyArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCryptoKeyVersionsPublicKeyInput)(nil)).Elem(), GetCryptoKeyVersionsPublicKeyArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCryptoKeyVersionsPublicKeyArrayInput)(nil)).Elem(), GetCryptoKeyVersionsPublicKeyArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCryptoKeyVersionsVersionInput)(nil)).Elem(), GetCryptoKeyVersionsVersionArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCryptoKeyVersionsVersionArrayInput)(nil)).Elem(), GetCryptoKeyVersionsVersionArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCryptoKeyVersionsVersionPublicKeyInput)(nil)).Elem(), GetCryptoKeyVersionsVersionPublicKeyArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetCryptoKeyVersionsVersionPublicKeyArrayInput)(nil)).Elem(), GetCryptoKeyVersionsVersionPublicKeyArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetCryptoKeysKeyInput)(nil)).Elem(), GetCryptoKeysKeyArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetCryptoKeysKeyArrayInput)(nil)).Elem(), GetCryptoKeysKeyArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetCryptoKeysKeyKeyAccessJustificationsPolicyInput)(nil)).Elem(), GetCryptoKeysKeyKeyAccessJustificationsPolicyArgs{}) @@ -3936,6 +4407,14 @@ func init() { pulumi.RegisterOutputType(KeyRingImportJobAttestationArrayOutput{}) pulumi.RegisterOutputType(KeyRingImportJobPublicKeyOutput{}) pulumi.RegisterOutputType(KeyRingImportJobPublicKeyArrayOutput{}) + pulumi.RegisterOutputType(GetCryptoKeyLatestVersionPublicKeyOutput{}) + pulumi.RegisterOutputType(GetCryptoKeyLatestVersionPublicKeyArrayOutput{}) + pulumi.RegisterOutputType(GetCryptoKeyVersionsPublicKeyOutput{}) + pulumi.RegisterOutputType(GetCryptoKeyVersionsPublicKeyArrayOutput{}) + pulumi.RegisterOutputType(GetCryptoKeyVersionsVersionOutput{}) + pulumi.RegisterOutputType(GetCryptoKeyVersionsVersionArrayOutput{}) + pulumi.RegisterOutputType(GetCryptoKeyVersionsVersionPublicKeyOutput{}) + pulumi.RegisterOutputType(GetCryptoKeyVersionsVersionPublicKeyArrayOutput{}) pulumi.RegisterOutputType(GetCryptoKeysKeyOutput{}) pulumi.RegisterOutputType(GetCryptoKeysKeyArrayOutput{}) pulumi.RegisterOutputType(GetCryptoKeysKeyKeyAccessJustificationsPolicyOutput{}) diff --git a/sdk/go/gcp/netapp/activeDirectory.go b/sdk/go/gcp/netapp/activeDirectory.go index 2ff1c98449..7e995a8b31 100644 --- a/sdk/go/gcp/netapp/activeDirectory.go +++ b/sdk/go/gcp/netapp/activeDirectory.go @@ -14,7 +14,7 @@ import ( // ActiveDirectory is the public representation of the active directory config. // -// To get more information about activeDirectory, see: +// To get more information about ActiveDirectory, see: // // * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.activeDirectories) // * How-to Guides @@ -80,7 +80,7 @@ import ( // // ## Import // -// activeDirectory can be imported using any of these accepted formats: +// ActiveDirectory can be imported using any of these accepted formats: // // * `projects/{{project}}/locations/{{location}}/activeDirectories/{{name}}` // @@ -88,7 +88,7 @@ import ( // // * `{{location}}/{{name}}` // -// When using the `pulumi import` command, activeDirectory can be imported using one of the formats above. For example: +// When using the `pulumi import` command, ActiveDirectory can be imported using one of the formats above. For example: // // ```sh // $ pulumi import gcp:netapp/activeDirectory:ActiveDirectory default projects/{{project}}/locations/{{location}}/activeDirectories/{{name}} diff --git a/sdk/go/gcp/netapp/backup.go b/sdk/go/gcp/netapp/backup.go index c321b42b00..a022b98ad2 100644 --- a/sdk/go/gcp/netapp/backup.go +++ b/sdk/go/gcp/netapp/backup.go @@ -27,7 +27,7 @@ import ( // from a volume or from an existing volume snapshot. Scheduled backups // require a backup policy. // -// To get more information about backup, see: +// To get more information about Backup, see: // // * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults.backups) // * How-to Guides @@ -107,7 +107,7 @@ import ( // // ## Import // -// backup can be imported using any of these accepted formats: +// Backup can be imported using any of these accepted formats: // // * `projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}}` // @@ -115,7 +115,7 @@ import ( // // * `{{location}}/{{vault_name}}/{{name}}` // -// When using the `pulumi import` command, backup can be imported using one of the formats above. For example: +// When using the `pulumi import` command, Backup can be imported using one of the formats above. For example: // // ```sh // $ pulumi import gcp:netapp/backup:Backup default projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}} diff --git a/sdk/go/gcp/netapp/backupPolicy.go b/sdk/go/gcp/netapp/backupPolicy.go index 3af430a33c..3e3e09a86c 100644 --- a/sdk/go/gcp/netapp/backupPolicy.go +++ b/sdk/go/gcp/netapp/backupPolicy.go @@ -16,7 +16,7 @@ import ( // Backup policies allow you to attach a backup schedule to a volume. // The policy defines how many backups to retain at daily, weekly, or monthly intervals. // -// To get more information about backupPolicy, see: +// To get more information about BackupPolicy, see: // // * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupPolicies) // * How-to Guides @@ -61,7 +61,7 @@ import ( // // ## Import // -// backupPolicy can be imported using any of these accepted formats: +// BackupPolicy can be imported using any of these accepted formats: // // * `projects/{{project}}/locations/{{location}}/backupPolicies/{{name}}` // @@ -69,7 +69,7 @@ import ( // // * `{{location}}/{{name}}` // -// When using the `pulumi import` command, backupPolicy can be imported using one of the formats above. For example: +// When using the `pulumi import` command, BackupPolicy can be imported using one of the formats above. For example: // // ```sh // $ pulumi import gcp:netapp/backupPolicy:BackupPolicy default projects/{{project}}/locations/{{location}}/backupPolicies/{{name}} diff --git a/sdk/go/gcp/netapp/backupVault.go b/sdk/go/gcp/netapp/backupVault.go index ba1d908ce6..d19b2fac13 100644 --- a/sdk/go/gcp/netapp/backupVault.go +++ b/sdk/go/gcp/netapp/backupVault.go @@ -15,7 +15,7 @@ import ( // A backup vault is the location where backups are stored. You can only create one backup vault per region. // A vault can hold multiple backups for multiple volumes in that region. // -// To get more information about backupVault, see: +// To get more information about BackupVault, see: // // * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults) // * How-to Guides @@ -56,7 +56,7 @@ import ( // // ## Import // -// backupVault can be imported using any of these accepted formats: +// BackupVault can be imported using any of these accepted formats: // // * `projects/{{project}}/locations/{{location}}/backupVaults/{{name}}` // @@ -64,7 +64,7 @@ import ( // // * `{{location}}/{{name}}` // -// When using the `pulumi import` command, backupVault can be imported using one of the formats above. For example: +// When using the `pulumi import` command, BackupVault can be imported using one of the formats above. For example: // // ```sh // $ pulumi import gcp:netapp/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{name}} diff --git a/sdk/go/gcp/netapp/storagePool.go b/sdk/go/gcp/netapp/storagePool.go index 673c7dbdc0..daec2eb501 100644 --- a/sdk/go/gcp/netapp/storagePool.go +++ b/sdk/go/gcp/netapp/storagePool.go @@ -92,7 +92,7 @@ import ( // // ## Import // -// storagePool can be imported using any of these accepted formats: +// StoragePool can be imported using any of these accepted formats: // // * `projects/{{project}}/locations/{{location}}/storagePools/{{name}}` // @@ -100,7 +100,7 @@ import ( // // * `{{location}}/{{name}}` // -// When using the `pulumi import` command, storagePool can be imported using one of the formats above. For example: +// When using the `pulumi import` command, StoragePool can be imported using one of the formats above. For example: // // ```sh // $ pulumi import gcp:netapp/storagePool:StoragePool default projects/{{project}}/locations/{{location}}/storagePools/{{name}} diff --git a/sdk/go/gcp/netapp/volume.go b/sdk/go/gcp/netapp/volume.go index 997338752c..ad08b80d2f 100644 --- a/sdk/go/gcp/netapp/volume.go +++ b/sdk/go/gcp/netapp/volume.go @@ -116,6 +116,7 @@ type Volume struct { // Policy to determine if the volume should be deleted forcefully. // Volumes may have nested snapshot resources. Deleting such a volume will fail. // Setting this parameter to FORCE will delete volumes including nested snapshots. + // Possible values: DEFAULT, FORCE. DeletionPolicy pulumi.StringPtrOutput `pulumi:"deletionPolicy"` // An optional description of this resource. Description pulumi.StringPtrOutput `pulumi:"description"` @@ -262,6 +263,7 @@ type volumeState struct { // Policy to determine if the volume should be deleted forcefully. // Volumes may have nested snapshot resources. Deleting such a volume will fail. // Setting this parameter to FORCE will delete volumes including nested snapshots. + // Possible values: DEFAULT, FORCE. DeletionPolicy *string `pulumi:"deletionPolicy"` // An optional description of this resource. Description *string `pulumi:"description"` @@ -359,6 +361,7 @@ type VolumeState struct { // Policy to determine if the volume should be deleted forcefully. // Volumes may have nested snapshot resources. Deleting such a volume will fail. // Setting this parameter to FORCE will delete volumes including nested snapshots. + // Possible values: DEFAULT, FORCE. DeletionPolicy pulumi.StringPtrInput // An optional description of this resource. Description pulumi.StringPtrInput @@ -456,6 +459,7 @@ type volumeArgs struct { // Policy to determine if the volume should be deleted forcefully. // Volumes may have nested snapshot resources. Deleting such a volume will fail. // Setting this parameter to FORCE will delete volumes including nested snapshots. + // Possible values: DEFAULT, FORCE. DeletionPolicy *string `pulumi:"deletionPolicy"` // An optional description of this resource. Description *string `pulumi:"description"` @@ -518,6 +522,7 @@ type VolumeArgs struct { // Policy to determine if the volume should be deleted forcefully. // Volumes may have nested snapshot resources. Deleting such a volume will fail. // Setting this parameter to FORCE will delete volumes including nested snapshots. + // Possible values: DEFAULT, FORCE. DeletionPolicy pulumi.StringPtrInput // An optional description of this resource. Description pulumi.StringPtrInput @@ -681,6 +686,7 @@ func (o VolumeOutput) CreateTime() pulumi.StringOutput { // Policy to determine if the volume should be deleted forcefully. // Volumes may have nested snapshot resources. Deleting such a volume will fail. // Setting this parameter to FORCE will delete volumes including nested snapshots. +// Possible values: DEFAULT, FORCE. func (o VolumeOutput) DeletionPolicy() pulumi.StringPtrOutput { return o.ApplyT(func(v *Volume) pulumi.StringPtrOutput { return v.DeletionPolicy }).(pulumi.StringPtrOutput) } diff --git a/sdk/go/gcp/networkconnectivity/pulumiTypes.go b/sdk/go/gcp/networkconnectivity/pulumiTypes.go index 5accf73f3f..fe2ae6892f 100644 --- a/sdk/go/gcp/networkconnectivity/pulumiTypes.go +++ b/sdk/go/gcp/networkconnectivity/pulumiTypes.go @@ -1839,6 +1839,8 @@ func (o SpokeLinkedRouterApplianceInstancesInstanceArrayOutput) Index(i pulumi.I type SpokeLinkedVpcNetwork struct { // IP ranges encompassing the subnets to be excluded from peering. ExcludeExportRanges []string `pulumi:"excludeExportRanges"` + // IP ranges allowed to be included from peering. + IncludeExportRanges []string `pulumi:"includeExportRanges"` // The URI of the VPC network resource. Uri string `pulumi:"uri"` } @@ -1857,6 +1859,8 @@ type SpokeLinkedVpcNetworkInput interface { type SpokeLinkedVpcNetworkArgs struct { // IP ranges encompassing the subnets to be excluded from peering. ExcludeExportRanges pulumi.StringArrayInput `pulumi:"excludeExportRanges"` + // IP ranges allowed to be included from peering. + IncludeExportRanges pulumi.StringArrayInput `pulumi:"includeExportRanges"` // The URI of the VPC network resource. Uri pulumi.StringInput `pulumi:"uri"` } @@ -1943,6 +1947,11 @@ func (o SpokeLinkedVpcNetworkOutput) ExcludeExportRanges() pulumi.StringArrayOut return o.ApplyT(func(v SpokeLinkedVpcNetwork) []string { return v.ExcludeExportRanges }).(pulumi.StringArrayOutput) } +// IP ranges allowed to be included from peering. +func (o SpokeLinkedVpcNetworkOutput) IncludeExportRanges() pulumi.StringArrayOutput { + return o.ApplyT(func(v SpokeLinkedVpcNetwork) []string { return v.IncludeExportRanges }).(pulumi.StringArrayOutput) +} + // The URI of the VPC network resource. func (o SpokeLinkedVpcNetworkOutput) Uri() pulumi.StringOutput { return o.ApplyT(func(v SpokeLinkedVpcNetwork) string { return v.Uri }).(pulumi.StringOutput) @@ -1982,6 +1991,16 @@ func (o SpokeLinkedVpcNetworkPtrOutput) ExcludeExportRanges() pulumi.StringArray }).(pulumi.StringArrayOutput) } +// IP ranges allowed to be included from peering. +func (o SpokeLinkedVpcNetworkPtrOutput) IncludeExportRanges() pulumi.StringArrayOutput { + return o.ApplyT(func(v *SpokeLinkedVpcNetwork) []string { + if v == nil { + return nil + } + return v.IncludeExportRanges + }).(pulumi.StringArrayOutput) +} + // The URI of the VPC network resource. func (o SpokeLinkedVpcNetworkPtrOutput) Uri() pulumi.StringPtrOutput { return o.ApplyT(func(v *SpokeLinkedVpcNetwork) *string { diff --git a/sdk/go/gcp/networkconnectivity/spoke.go b/sdk/go/gcp/networkconnectivity/spoke.go index b9d4d5dc91..9ce4b90688 100644 --- a/sdk/go/gcp/networkconnectivity/spoke.go +++ b/sdk/go/gcp/networkconnectivity/spoke.go @@ -67,6 +67,10 @@ import ( // pulumi.String("198.51.100.0/24"), // pulumi.String("10.10.0.0/16"), // }, +// IncludeExportRanges: pulumi.StringArray{ +// pulumi.String("198.51.100.0/23"), +// pulumi.String("10.0.0.0/8"), +// }, // Uri: network.SelfLink, // }, // }) diff --git a/sdk/go/gcp/networksecurity/clientTlsPolicy.go b/sdk/go/gcp/networksecurity/clientTlsPolicy.go index baee8312be..0aaa4a9604 100644 --- a/sdk/go/gcp/networksecurity/clientTlsPolicy.go +++ b/sdk/go/gcp/networksecurity/clientTlsPolicy.go @@ -11,6 +11,14 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) +// ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. +// +// To get more information about ClientTlsPolicy, see: +// +// * [API documentation](https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.clientTlsPolicies) +// * How-to Guides +// - [Service Security](https://cloud.google.com/traffic-director/docs/security-use-cases) +// // ## Example Usage // // ### Network Security Client Tls Policy Basic @@ -74,11 +82,6 @@ import ( // TargetUri: pulumi.String("unix:mypath"), // }, // }, -// &networksecurity.ClientTlsPolicyServerValidationCaArgs{ -// GrpcEndpoint: &networksecurity.ClientTlsPolicyServerValidationCaGrpcEndpointArgs{ -// TargetUri: pulumi.String("unix:mypath1"), -// }, -// }, // }, // }) // if err != nil { diff --git a/sdk/go/gcp/networksecurity/serverTlsPolicy.go b/sdk/go/gcp/networksecurity/serverTlsPolicy.go index 8ef190a165..1ba265cba8 100644 --- a/sdk/go/gcp/networksecurity/serverTlsPolicy.go +++ b/sdk/go/gcp/networksecurity/serverTlsPolicy.go @@ -11,6 +11,12 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) +// ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. +// +// To get more information about ServerTlsPolicy, see: +// +// * [API documentation](https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.serverTlsPolicies) +// // ## Example Usage // // ### Network Security Server Tls Policy Basic @@ -46,16 +52,6 @@ import ( // TargetUri: pulumi.String("unix:mypath"), // }, // }, -// &networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaArgs{ -// GrpcEndpoint: &networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs{ -// TargetUri: pulumi.String("unix:abc/mypath"), -// }, -// }, -// &networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaArgs{ -// CertificateProviderInstance: &networksecurity.ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstanceArgs{ -// PluginInstance: pulumi.String("google_cloud_private_spiffe"), -// }, -// }, // }, // }, // }) diff --git a/sdk/go/gcp/organizations/getProject.go b/sdk/go/gcp/organizations/getProject.go index 172a7c5127..3527b797be 100644 --- a/sdk/go/gcp/organizations/getProject.go +++ b/sdk/go/gcp/organizations/getProject.go @@ -71,6 +71,7 @@ type LookupProjectResult struct { OrgId string `pulumi:"orgId"` ProjectId *string `pulumi:"projectId"` PulumiLabels map[string]string `pulumi:"pulumiLabels"` + Tags map[string]string `pulumi:"tags"` } func LookupProjectOutput(ctx *pulumi.Context, args LookupProjectOutputArgs, opts ...pulumi.InvokeOption) LookupProjectResultOutput { @@ -161,6 +162,10 @@ func (o LookupProjectResultOutput) PulumiLabels() pulumi.StringMapOutput { return o.ApplyT(func(v LookupProjectResult) map[string]string { return v.PulumiLabels }).(pulumi.StringMapOutput) } +func (o LookupProjectResultOutput) Tags() pulumi.StringMapOutput { + return o.ApplyT(func(v LookupProjectResult) map[string]string { return v.Tags }).(pulumi.StringMapOutput) +} + func init() { pulumi.RegisterOutputType(LookupProjectResultOutput{}) } diff --git a/sdk/go/gcp/organizations/project.go b/sdk/go/gcp/organizations/project.go index 9be6e69ccd..bdd5a6ecde 100644 --- a/sdk/go/gcp/organizations/project.go +++ b/sdk/go/gcp/organizations/project.go @@ -23,6 +23,10 @@ import ( // // > This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account. // +// > It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `autoCreateNetwork` to false, when possible. +// +// > It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. +// // To get more information about projects, see: // // * [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects) @@ -92,6 +96,37 @@ import ( // // ``` // +// # To create a project with a tag +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := organizations.NewProject(ctx, "my_project", &organizations.ProjectArgs{ +// Name: pulumi.String("My Project"), +// ProjectId: pulumi.String("your-project-id"), +// OrgId: pulumi.String("1234567"), +// Tags: pulumi.StringMap{ +// "1234567/env": pulumi.String("staging"), +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// // ## Import // // Projects can be imported using the `project_id`, e.g. @@ -144,6 +179,8 @@ type Project struct { ProjectId pulumi.StringOutput `pulumi:"projectId"` // The combination of labels configured directly on the resource and default labels configured on the provider. PulumiLabels pulumi.StringMapOutput `pulumi:"pulumiLabels"` + // A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + Tags pulumi.StringMapOutput `pulumi:"tags"` } // NewProject registers a new resource with the given unique name, arguments, and options. @@ -219,6 +256,8 @@ type projectState struct { ProjectId *string `pulumi:"projectId"` // The combination of labels configured directly on the resource and default labels configured on the provider. PulumiLabels map[string]string `pulumi:"pulumiLabels"` + // A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + Tags map[string]string `pulumi:"tags"` } type ProjectState struct { @@ -260,6 +299,8 @@ type ProjectState struct { ProjectId pulumi.StringPtrInput // The combination of labels configured directly on the resource and default labels configured on the provider. PulumiLabels pulumi.StringMapInput + // A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + Tags pulumi.StringMapInput } func (ProjectState) ElementType() reflect.Type { @@ -299,6 +340,8 @@ type projectArgs struct { OrgId *string `pulumi:"orgId"` // The project ID. Changing this forces a new project to be created. ProjectId *string `pulumi:"projectId"` + // A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + Tags map[string]string `pulumi:"tags"` } // The set of arguments for constructing a Project resource. @@ -335,6 +378,8 @@ type ProjectArgs struct { OrgId pulumi.StringPtrInput // The project ID. Changing this forces a new project to be created. ProjectId pulumi.StringPtrInput + // A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + Tags pulumi.StringMapInput } func (ProjectArgs) ElementType() reflect.Type { @@ -495,6 +540,11 @@ func (o ProjectOutput) PulumiLabels() pulumi.StringMapOutput { return o.ApplyT(func(v *Project) pulumi.StringMapOutput { return v.PulumiLabels }).(pulumi.StringMapOutput) } +// A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. +func (o ProjectOutput) Tags() pulumi.StringMapOutput { + return o.ApplyT(func(v *Project) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput) +} + type ProjectArrayOutput struct{ *pulumi.OutputState } func (ProjectArrayOutput) ElementType() reflect.Type { diff --git a/sdk/go/gcp/parallelstore/instance.go b/sdk/go/gcp/parallelstore/instance.go index 16b1fda6c2..b03a9d274c 100644 --- a/sdk/go/gcp/parallelstore/instance.go +++ b/sdk/go/gcp/parallelstore/instance.go @@ -116,7 +116,7 @@ type Instance struct { CapacityGib pulumi.StringOutput `pulumi:"capacityGib"` // The time when the instance was created. CreateTime pulumi.StringOutput `pulumi:"createTime"` - // The version of DAOS software running in the instance + // The version of DAOS software running in the instance. DaosVersion pulumi.StringOutput `pulumi:"daosVersion"` // The description of the instance. 2048 characters or less. Description pulumi.StringPtrOutput `pulumi:"description"` @@ -131,9 +131,9 @@ type Instance struct { DirectoryStripeLevel pulumi.StringPtrOutput `pulumi:"directoryStripeLevel"` // All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. EffectiveLabels pulumi.StringMapOutput `pulumi:"effectiveLabels"` - // Immutable. Contains the id of the allocated IP address range associated with the - // private service access connection for example, "test-default" associated - // with IP range 10.0.0.0/29. This field is populated by the service and + // Immutable. Contains the id of the allocated IP address + // range associated with the private service access connection for example, \"test-default\" + // associated with IP range 10.0.0.0/29. This field is populated by the service // and contains the value currently used by the service. EffectiveReservedIpRange pulumi.StringOutput `pulumi:"effectiveReservedIpRange"` // Stripe level for files. @@ -154,12 +154,12 @@ type Instance struct { // // *** InstanceId pulumi.StringOutput `pulumi:"instanceId"` - // Cloud Labels are a flexible and lightweight mechanism for organizing cloud - // resources into groups that reflect a customer's organizational needs and - // deployment strategies. Cloud Labels can be used to filter collections of - // resources. They can be used to control how resource metrics are aggregated. - // And they can be used as arguments to policy management rules (e.g. route, - // firewall, load balancing, etc.). + // Cloud Labels are a flexible and lightweight mechanism for + // organizing cloud resources into groups that reflect a customer's organizational + // needs and deployment strategies. Cloud Labels can be used to filter collections + // of resources. They can be used to control how resource metrics are aggregated. + // And they can be used as arguments to policy management rules (e.g. route, firewall, + // load balancing, etc.). // * Label keys must be between 1 and 63 characters long and must conform to // the following regular expression: `a-z{0,62}`. // * Label values must be between 0 and 63 characters long and must conform @@ -170,19 +170,19 @@ type Instance struct { // characters may be allowed in the future. Therefore, you are advised to use // an internal label representation, such as JSON, which doesn't rely upon // specific characters being disallowed. For example, representing labels - // as the string: name + "_" + value would prove problematic if we were to - // allow "_" in a future release. - // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - // Please refer to the field `effectiveLabels` for all of the labels present on the resource. + // as the string: `name + "_" + value` would prove problematic if we were to + // allow `"_"` in a future release. " + // + // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + // Please refer to the field `effectiveLabels` for all of the labels present on the resource. Labels pulumi.StringMapOutput `pulumi:"labels"` // Part of `parent`. See documentation of `projectsId`. Location pulumi.StringOutput `pulumi:"location"` // Identifier. The resource name of the instance, in the format // `projects/{project}/locations/{location}/instances/{instance_id}` Name pulumi.StringOutput `pulumi:"name"` - // Immutable. The name of the Google Compute Engine - // [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - // instance is connected. + // Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + // to which the instance is connected. Network pulumi.StringPtrOutput `pulumi:"network"` // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. @@ -190,10 +190,10 @@ type Instance struct { // The combination of labels configured directly on the resource // and default labels configured on the provider. PulumiLabels pulumi.StringMapOutput `pulumi:"pulumiLabels"` - // Immutable. Contains the id of the allocated IP address range associated with the - // private service access connection for example, "test-default" associated - // with IP range 10.0.0.0/29. If no range id is provided all ranges will be - // considered. + // Immutable. Contains the id of the allocated IP address range + // associated with the private service access connection for example, \"test-default\" + // associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + // be considered. ReservedIpRange pulumi.StringPtrOutput `pulumi:"reservedIpRange"` // The instance state. // Possible values: @@ -259,7 +259,7 @@ type instanceState struct { CapacityGib *string `pulumi:"capacityGib"` // The time when the instance was created. CreateTime *string `pulumi:"createTime"` - // The version of DAOS software running in the instance + // The version of DAOS software running in the instance. DaosVersion *string `pulumi:"daosVersion"` // The description of the instance. 2048 characters or less. Description *string `pulumi:"description"` @@ -274,9 +274,9 @@ type instanceState struct { DirectoryStripeLevel *string `pulumi:"directoryStripeLevel"` // All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. EffectiveLabels map[string]string `pulumi:"effectiveLabels"` - // Immutable. Contains the id of the allocated IP address range associated with the - // private service access connection for example, "test-default" associated - // with IP range 10.0.0.0/29. This field is populated by the service and + // Immutable. Contains the id of the allocated IP address + // range associated with the private service access connection for example, \"test-default\" + // associated with IP range 10.0.0.0/29. This field is populated by the service // and contains the value currently used by the service. EffectiveReservedIpRange *string `pulumi:"effectiveReservedIpRange"` // Stripe level for files. @@ -297,12 +297,12 @@ type instanceState struct { // // *** InstanceId *string `pulumi:"instanceId"` - // Cloud Labels are a flexible and lightweight mechanism for organizing cloud - // resources into groups that reflect a customer's organizational needs and - // deployment strategies. Cloud Labels can be used to filter collections of - // resources. They can be used to control how resource metrics are aggregated. - // And they can be used as arguments to policy management rules (e.g. route, - // firewall, load balancing, etc.). + // Cloud Labels are a flexible and lightweight mechanism for + // organizing cloud resources into groups that reflect a customer's organizational + // needs and deployment strategies. Cloud Labels can be used to filter collections + // of resources. They can be used to control how resource metrics are aggregated. + // And they can be used as arguments to policy management rules (e.g. route, firewall, + // load balancing, etc.). // * Label keys must be between 1 and 63 characters long and must conform to // the following regular expression: `a-z{0,62}`. // * Label values must be between 0 and 63 characters long and must conform @@ -313,19 +313,19 @@ type instanceState struct { // characters may be allowed in the future. Therefore, you are advised to use // an internal label representation, such as JSON, which doesn't rely upon // specific characters being disallowed. For example, representing labels - // as the string: name + "_" + value would prove problematic if we were to - // allow "_" in a future release. - // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - // Please refer to the field `effectiveLabels` for all of the labels present on the resource. + // as the string: `name + "_" + value` would prove problematic if we were to + // allow `"_"` in a future release. " + // + // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + // Please refer to the field `effectiveLabels` for all of the labels present on the resource. Labels map[string]string `pulumi:"labels"` // Part of `parent`. See documentation of `projectsId`. Location *string `pulumi:"location"` // Identifier. The resource name of the instance, in the format // `projects/{project}/locations/{location}/instances/{instance_id}` Name *string `pulumi:"name"` - // Immutable. The name of the Google Compute Engine - // [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - // instance is connected. + // Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + // to which the instance is connected. Network *string `pulumi:"network"` // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. @@ -333,10 +333,10 @@ type instanceState struct { // The combination of labels configured directly on the resource // and default labels configured on the provider. PulumiLabels map[string]string `pulumi:"pulumiLabels"` - // Immutable. Contains the id of the allocated IP address range associated with the - // private service access connection for example, "test-default" associated - // with IP range 10.0.0.0/29. If no range id is provided all ranges will be - // considered. + // Immutable. Contains the id of the allocated IP address range + // associated with the private service access connection for example, \"test-default\" + // associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + // be considered. ReservedIpRange *string `pulumi:"reservedIpRange"` // The instance state. // Possible values: @@ -359,7 +359,7 @@ type InstanceState struct { CapacityGib pulumi.StringPtrInput // The time when the instance was created. CreateTime pulumi.StringPtrInput - // The version of DAOS software running in the instance + // The version of DAOS software running in the instance. DaosVersion pulumi.StringPtrInput // The description of the instance. 2048 characters or less. Description pulumi.StringPtrInput @@ -374,9 +374,9 @@ type InstanceState struct { DirectoryStripeLevel pulumi.StringPtrInput // All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. EffectiveLabels pulumi.StringMapInput - // Immutable. Contains the id of the allocated IP address range associated with the - // private service access connection for example, "test-default" associated - // with IP range 10.0.0.0/29. This field is populated by the service and + // Immutable. Contains the id of the allocated IP address + // range associated with the private service access connection for example, \"test-default\" + // associated with IP range 10.0.0.0/29. This field is populated by the service // and contains the value currently used by the service. EffectiveReservedIpRange pulumi.StringPtrInput // Stripe level for files. @@ -397,12 +397,12 @@ type InstanceState struct { // // *** InstanceId pulumi.StringPtrInput - // Cloud Labels are a flexible and lightweight mechanism for organizing cloud - // resources into groups that reflect a customer's organizational needs and - // deployment strategies. Cloud Labels can be used to filter collections of - // resources. They can be used to control how resource metrics are aggregated. - // And they can be used as arguments to policy management rules (e.g. route, - // firewall, load balancing, etc.). + // Cloud Labels are a flexible and lightweight mechanism for + // organizing cloud resources into groups that reflect a customer's organizational + // needs and deployment strategies. Cloud Labels can be used to filter collections + // of resources. They can be used to control how resource metrics are aggregated. + // And they can be used as arguments to policy management rules (e.g. route, firewall, + // load balancing, etc.). // * Label keys must be between 1 and 63 characters long and must conform to // the following regular expression: `a-z{0,62}`. // * Label values must be between 0 and 63 characters long and must conform @@ -413,19 +413,19 @@ type InstanceState struct { // characters may be allowed in the future. Therefore, you are advised to use // an internal label representation, such as JSON, which doesn't rely upon // specific characters being disallowed. For example, representing labels - // as the string: name + "_" + value would prove problematic if we were to - // allow "_" in a future release. - // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - // Please refer to the field `effectiveLabels` for all of the labels present on the resource. + // as the string: `name + "_" + value` would prove problematic if we were to + // allow `"_"` in a future release. " + // + // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + // Please refer to the field `effectiveLabels` for all of the labels present on the resource. Labels pulumi.StringMapInput // Part of `parent`. See documentation of `projectsId`. Location pulumi.StringPtrInput // Identifier. The resource name of the instance, in the format // `projects/{project}/locations/{location}/instances/{instance_id}` Name pulumi.StringPtrInput - // Immutable. The name of the Google Compute Engine - // [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - // instance is connected. + // Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + // to which the instance is connected. Network pulumi.StringPtrInput // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. @@ -433,10 +433,10 @@ type InstanceState struct { // The combination of labels configured directly on the resource // and default labels configured on the provider. PulumiLabels pulumi.StringMapInput - // Immutable. Contains the id of the allocated IP address range associated with the - // private service access connection for example, "test-default" associated - // with IP range 10.0.0.0/29. If no range id is provided all ranges will be - // considered. + // Immutable. Contains the id of the allocated IP address range + // associated with the private service access connection for example, \"test-default\" + // associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + // be considered. ReservedIpRange pulumi.StringPtrInput // The instance state. // Possible values: @@ -487,12 +487,12 @@ type instanceArgs struct { // // *** InstanceId string `pulumi:"instanceId"` - // Cloud Labels are a flexible and lightweight mechanism for organizing cloud - // resources into groups that reflect a customer's organizational needs and - // deployment strategies. Cloud Labels can be used to filter collections of - // resources. They can be used to control how resource metrics are aggregated. - // And they can be used as arguments to policy management rules (e.g. route, - // firewall, load balancing, etc.). + // Cloud Labels are a flexible and lightweight mechanism for + // organizing cloud resources into groups that reflect a customer's organizational + // needs and deployment strategies. Cloud Labels can be used to filter collections + // of resources. They can be used to control how resource metrics are aggregated. + // And they can be used as arguments to policy management rules (e.g. route, firewall, + // load balancing, etc.). // * Label keys must be between 1 and 63 characters long and must conform to // the following regular expression: `a-z{0,62}`. // * Label values must be between 0 and 63 characters long and must conform @@ -503,24 +503,24 @@ type instanceArgs struct { // characters may be allowed in the future. Therefore, you are advised to use // an internal label representation, such as JSON, which doesn't rely upon // specific characters being disallowed. For example, representing labels - // as the string: name + "_" + value would prove problematic if we were to - // allow "_" in a future release. - // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - // Please refer to the field `effectiveLabels` for all of the labels present on the resource. + // as the string: `name + "_" + value` would prove problematic if we were to + // allow `"_"` in a future release. " + // + // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + // Please refer to the field `effectiveLabels` for all of the labels present on the resource. Labels map[string]string `pulumi:"labels"` // Part of `parent`. See documentation of `projectsId`. Location string `pulumi:"location"` - // Immutable. The name of the Google Compute Engine - // [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - // instance is connected. + // Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + // to which the instance is connected. Network *string `pulumi:"network"` // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project *string `pulumi:"project"` - // Immutable. Contains the id of the allocated IP address range associated with the - // private service access connection for example, "test-default" associated - // with IP range 10.0.0.0/29. If no range id is provided all ranges will be - // considered. + // Immutable. Contains the id of the allocated IP address range + // associated with the private service access connection for example, \"test-default\" + // associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + // be considered. ReservedIpRange *string `pulumi:"reservedIpRange"` } @@ -557,12 +557,12 @@ type InstanceArgs struct { // // *** InstanceId pulumi.StringInput - // Cloud Labels are a flexible and lightweight mechanism for organizing cloud - // resources into groups that reflect a customer's organizational needs and - // deployment strategies. Cloud Labels can be used to filter collections of - // resources. They can be used to control how resource metrics are aggregated. - // And they can be used as arguments to policy management rules (e.g. route, - // firewall, load balancing, etc.). + // Cloud Labels are a flexible and lightweight mechanism for + // organizing cloud resources into groups that reflect a customer's organizational + // needs and deployment strategies. Cloud Labels can be used to filter collections + // of resources. They can be used to control how resource metrics are aggregated. + // And they can be used as arguments to policy management rules (e.g. route, firewall, + // load balancing, etc.). // * Label keys must be between 1 and 63 characters long and must conform to // the following regular expression: `a-z{0,62}`. // * Label values must be between 0 and 63 characters long and must conform @@ -573,24 +573,24 @@ type InstanceArgs struct { // characters may be allowed in the future. Therefore, you are advised to use // an internal label representation, such as JSON, which doesn't rely upon // specific characters being disallowed. For example, representing labels - // as the string: name + "_" + value would prove problematic if we were to - // allow "_" in a future release. - // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - // Please refer to the field `effectiveLabels` for all of the labels present on the resource. + // as the string: `name + "_" + value` would prove problematic if we were to + // allow `"_"` in a future release. " + // + // **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + // Please refer to the field `effectiveLabels` for all of the labels present on the resource. Labels pulumi.StringMapInput // Part of `parent`. See documentation of `projectsId`. Location pulumi.StringInput - // Immutable. The name of the Google Compute Engine - // [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - // instance is connected. + // Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + // to which the instance is connected. Network pulumi.StringPtrInput // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project pulumi.StringPtrInput - // Immutable. Contains the id of the allocated IP address range associated with the - // private service access connection for example, "test-default" associated - // with IP range 10.0.0.0/29. If no range id is provided all ranges will be - // considered. + // Immutable. Contains the id of the allocated IP address range + // associated with the private service access connection for example, \"test-default\" + // associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + // be considered. ReservedIpRange pulumi.StringPtrInput } @@ -697,7 +697,7 @@ func (o InstanceOutput) CreateTime() pulumi.StringOutput { return o.ApplyT(func(v *Instance) pulumi.StringOutput { return v.CreateTime }).(pulumi.StringOutput) } -// The version of DAOS software running in the instance +// The version of DAOS software running in the instance. func (o InstanceOutput) DaosVersion() pulumi.StringOutput { return o.ApplyT(func(v *Instance) pulumi.StringOutput { return v.DaosVersion }).(pulumi.StringOutput) } @@ -724,9 +724,9 @@ func (o InstanceOutput) EffectiveLabels() pulumi.StringMapOutput { return o.ApplyT(func(v *Instance) pulumi.StringMapOutput { return v.EffectiveLabels }).(pulumi.StringMapOutput) } -// Immutable. Contains the id of the allocated IP address range associated with the -// private service access connection for example, "test-default" associated -// with IP range 10.0.0.0/29. This field is populated by the service and +// Immutable. Contains the id of the allocated IP address +// range associated with the private service access connection for example, \"test-default\" +// associated with IP range 10.0.0.0/29. This field is populated by the service // and contains the value currently used by the service. func (o InstanceOutput) EffectiveReservedIpRange() pulumi.StringOutput { return o.ApplyT(func(v *Instance) pulumi.StringOutput { return v.EffectiveReservedIpRange }).(pulumi.StringOutput) @@ -756,12 +756,12 @@ func (o InstanceOutput) InstanceId() pulumi.StringOutput { return o.ApplyT(func(v *Instance) pulumi.StringOutput { return v.InstanceId }).(pulumi.StringOutput) } -// Cloud Labels are a flexible and lightweight mechanism for organizing cloud -// resources into groups that reflect a customer's organizational needs and -// deployment strategies. Cloud Labels can be used to filter collections of -// resources. They can be used to control how resource metrics are aggregated. -// And they can be used as arguments to policy management rules (e.g. route, -// firewall, load balancing, etc.). +// Cloud Labels are a flexible and lightweight mechanism for +// organizing cloud resources into groups that reflect a customer's organizational +// needs and deployment strategies. Cloud Labels can be used to filter collections +// of resources. They can be used to control how resource metrics are aggregated. +// And they can be used as arguments to policy management rules (e.g. route, firewall, +// load balancing, etc.). // - Label keys must be between 1 and 63 characters long and must conform to // the following regular expression: `a-z{0,62}`. // - Label values must be between 0 and 63 characters long and must conform @@ -772,10 +772,11 @@ func (o InstanceOutput) InstanceId() pulumi.StringOutput { // characters may be allowed in the future. Therefore, you are advised to use // an internal label representation, such as JSON, which doesn't rely upon // specific characters being disallowed. For example, representing labels -// as the string: name + "_" + value would prove problematic if we were to -// allow "_" in a future release. -// **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. -// Please refer to the field `effectiveLabels` for all of the labels present on the resource. +// as the string: `name + "_" + value` would prove problematic if we were to +// allow `"_"` in a future release. " +// +// **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. +// Please refer to the field `effectiveLabels` for all of the labels present on the resource. func (o InstanceOutput) Labels() pulumi.StringMapOutput { return o.ApplyT(func(v *Instance) pulumi.StringMapOutput { return v.Labels }).(pulumi.StringMapOutput) } @@ -791,9 +792,8 @@ func (o InstanceOutput) Name() pulumi.StringOutput { return o.ApplyT(func(v *Instance) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) } -// Immutable. The name of the Google Compute Engine -// [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the -// instance is connected. +// Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) +// to which the instance is connected. func (o InstanceOutput) Network() pulumi.StringPtrOutput { return o.ApplyT(func(v *Instance) pulumi.StringPtrOutput { return v.Network }).(pulumi.StringPtrOutput) } @@ -810,10 +810,10 @@ func (o InstanceOutput) PulumiLabels() pulumi.StringMapOutput { return o.ApplyT(func(v *Instance) pulumi.StringMapOutput { return v.PulumiLabels }).(pulumi.StringMapOutput) } -// Immutable. Contains the id of the allocated IP address range associated with the -// private service access connection for example, "test-default" associated -// with IP range 10.0.0.0/29. If no range id is provided all ranges will be -// considered. +// Immutable. Contains the id of the allocated IP address range +// associated with the private service access connection for example, \"test-default\" +// associated with IP range 10.0.0.0/29. If no range id is provided all ranges will +// be considered. func (o InstanceOutput) ReservedIpRange() pulumi.StringPtrOutput { return o.ApplyT(func(v *Instance) pulumi.StringPtrOutput { return v.ReservedIpRange }).(pulumi.StringPtrOutput) } diff --git a/sdk/go/gcp/projects/iamMemberRemove.go b/sdk/go/gcp/projects/iamMemberRemove.go index fd45b5fb79..64ba079e78 100644 --- a/sdk/go/gcp/projects/iamMemberRemove.go +++ b/sdk/go/gcp/projects/iamMemberRemove.go @@ -31,6 +31,41 @@ import ( // [the official documentation](https://cloud.google.com/iam/docs/granting-changing-revoking-access) // and // [API reference](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy). +// +// ## Example Usage +// +// ```go +// package main +// +// import ( +// +// "fmt" +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/projects" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := organizations.LookupProject(ctx, nil, nil) +// if err != nil { +// return err +// } +// _, err = projects.NewIamMemberRemove(ctx, "foo", &projects.IamMemberRemoveArgs{ +// Role: pulumi.String("roles/editor"), +// Project: pulumi.Any(targetProjectGoogleProject.ProjectId), +// Member: pulumi.Sprintf("serviceAccount:%v-compute@developer.gserviceaccount.com", targetProjectGoogleProject.Number), +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` type IamMemberRemove struct { pulumi.CustomResourceState diff --git a/sdk/go/gcp/projects/usageExportBucket.go b/sdk/go/gcp/projects/usageExportBucket.go index 68ebf9398d..085324da6f 100644 --- a/sdk/go/gcp/projects/usageExportBucket.go +++ b/sdk/go/gcp/projects/usageExportBucket.go @@ -24,6 +24,10 @@ import ( // // > This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account. // +// > It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `autoCreateNetwork` to false, when possible. +// +// > It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. +// // To get more information about projects, see: // // * [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects) @@ -93,6 +97,37 @@ import ( // // ``` // +// # To create a project with a tag +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := organizations.NewProject(ctx, "my_project", &organizations.ProjectArgs{ +// Name: pulumi.String("My Project"), +// ProjectId: pulumi.String("your-project-id"), +// OrgId: pulumi.String("1234567"), +// Tags: pulumi.StringMap{ +// "1234567/env": pulumi.String("staging"), +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// // ## Import // // Projects can be imported using the `project_id`, e.g. diff --git a/sdk/go/gcp/pubsub/pulumiTypes.go b/sdk/go/gcp/pubsub/pulumiTypes.go index 6207dd588a..ce2ce3a5af 100644 --- a/sdk/go/gcp/pubsub/pulumiTypes.go +++ b/sdk/go/gcp/pubsub/pulumiTypes.go @@ -1388,6 +1388,8 @@ type SubscriptionCloudStorageConfig struct { // May not exceed the subscription's acknowledgement deadline. // A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". MaxDuration *string `pulumi:"maxDuration"` + // The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + MaxMessages *int `pulumi:"maxMessages"` // The service account to use to write to Cloud Storage. If not specified, the Pub/Sub // [service agent](https://cloud.google.com/iam/docs/service-agents), // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -1427,6 +1429,8 @@ type SubscriptionCloudStorageConfigArgs struct { // May not exceed the subscription's acknowledgement deadline. // A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". MaxDuration pulumi.StringPtrInput `pulumi:"maxDuration"` + // The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + MaxMessages pulumi.IntPtrInput `pulumi:"maxMessages"` // The service account to use to write to Cloud Storage. If not specified, the Pub/Sub // [service agent](https://cloud.google.com/iam/docs/service-agents), // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -1552,6 +1556,11 @@ func (o SubscriptionCloudStorageConfigOutput) MaxDuration() pulumi.StringPtrOutp return o.ApplyT(func(v SubscriptionCloudStorageConfig) *string { return v.MaxDuration }).(pulumi.StringPtrOutput) } +// The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. +func (o SubscriptionCloudStorageConfigOutput) MaxMessages() pulumi.IntPtrOutput { + return o.ApplyT(func(v SubscriptionCloudStorageConfig) *int { return v.MaxMessages }).(pulumi.IntPtrOutput) +} + // The service account to use to write to Cloud Storage. If not specified, the Pub/Sub // [service agent](https://cloud.google.com/iam/docs/service-agents), // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -1663,6 +1672,16 @@ func (o SubscriptionCloudStorageConfigPtrOutput) MaxDuration() pulumi.StringPtrO }).(pulumi.StringPtrOutput) } +// The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. +func (o SubscriptionCloudStorageConfigPtrOutput) MaxMessages() pulumi.IntPtrOutput { + return o.ApplyT(func(v *SubscriptionCloudStorageConfig) *int { + if v == nil { + return nil + } + return v.MaxMessages + }).(pulumi.IntPtrOutput) +} + // The service account to use to write to Cloud Storage. If not specified, the Pub/Sub // [service agent](https://cloud.google.com/iam/docs/service-agents), // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -1687,6 +1706,8 @@ func (o SubscriptionCloudStorageConfigPtrOutput) State() pulumi.StringPtrOutput } type SubscriptionCloudStorageConfigAvroConfig struct { + // When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + UseTopicSchema *bool `pulumi:"useTopicSchema"` // When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. WriteMetadata *bool `pulumi:"writeMetadata"` } @@ -1703,6 +1724,8 @@ type SubscriptionCloudStorageConfigAvroConfigInput interface { } type SubscriptionCloudStorageConfigAvroConfigArgs struct { + // When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + UseTopicSchema pulumi.BoolPtrInput `pulumi:"useTopicSchema"` // When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. WriteMetadata pulumi.BoolPtrInput `pulumi:"writeMetadata"` } @@ -1784,6 +1807,11 @@ func (o SubscriptionCloudStorageConfigAvroConfigOutput) ToSubscriptionCloudStora }).(SubscriptionCloudStorageConfigAvroConfigPtrOutput) } +// When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. +func (o SubscriptionCloudStorageConfigAvroConfigOutput) UseTopicSchema() pulumi.BoolPtrOutput { + return o.ApplyT(func(v SubscriptionCloudStorageConfigAvroConfig) *bool { return v.UseTopicSchema }).(pulumi.BoolPtrOutput) +} + // When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. func (o SubscriptionCloudStorageConfigAvroConfigOutput) WriteMetadata() pulumi.BoolPtrOutput { return o.ApplyT(func(v SubscriptionCloudStorageConfigAvroConfig) *bool { return v.WriteMetadata }).(pulumi.BoolPtrOutput) @@ -1813,6 +1841,16 @@ func (o SubscriptionCloudStorageConfigAvroConfigPtrOutput) Elem() SubscriptionCl }).(SubscriptionCloudStorageConfigAvroConfigOutput) } +// When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. +func (o SubscriptionCloudStorageConfigAvroConfigPtrOutput) UseTopicSchema() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *SubscriptionCloudStorageConfigAvroConfig) *bool { + if v == nil { + return nil + } + return v.UseTopicSchema + }).(pulumi.BoolPtrOutput) +} + // When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. func (o SubscriptionCloudStorageConfigAvroConfigPtrOutput) WriteMetadata() pulumi.BoolPtrOutput { return o.ApplyT(func(v *SubscriptionCloudStorageConfigAvroConfig) *bool { @@ -4506,6 +4544,8 @@ type GetSubscriptionCloudStorageConfig struct { // May not exceed the subscription's acknowledgement deadline. // A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". MaxDuration string `pulumi:"maxDuration"` + // The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + MaxMessages int `pulumi:"maxMessages"` // The service account to use to write to Cloud Storage. If not specified, the Pub/Sub // [service agent](https://cloud.google.com/iam/docs/service-agents), // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -4543,6 +4583,8 @@ type GetSubscriptionCloudStorageConfigArgs struct { // May not exceed the subscription's acknowledgement deadline. // A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". MaxDuration pulumi.StringInput `pulumi:"maxDuration"` + // The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + MaxMessages pulumi.IntInput `pulumi:"maxMessages"` // The service account to use to write to Cloud Storage. If not specified, the Pub/Sub // [service agent](https://cloud.google.com/iam/docs/service-agents), // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -4642,6 +4684,11 @@ func (o GetSubscriptionCloudStorageConfigOutput) MaxDuration() pulumi.StringOutp return o.ApplyT(func(v GetSubscriptionCloudStorageConfig) string { return v.MaxDuration }).(pulumi.StringOutput) } +// The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. +func (o GetSubscriptionCloudStorageConfigOutput) MaxMessages() pulumi.IntOutput { + return o.ApplyT(func(v GetSubscriptionCloudStorageConfig) int { return v.MaxMessages }).(pulumi.IntOutput) +} + // The service account to use to write to Cloud Storage. If not specified, the Pub/Sub // [service agent](https://cloud.google.com/iam/docs/service-agents), // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -4675,6 +4722,8 @@ func (o GetSubscriptionCloudStorageConfigArrayOutput) Index(i pulumi.IntInput) G } type GetSubscriptionCloudStorageConfigAvroConfig struct { + // When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + UseTopicSchema bool `pulumi:"useTopicSchema"` // When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. WriteMetadata bool `pulumi:"writeMetadata"` } @@ -4691,6 +4740,8 @@ type GetSubscriptionCloudStorageConfigAvroConfigInput interface { } type GetSubscriptionCloudStorageConfigAvroConfigArgs struct { + // When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + UseTopicSchema pulumi.BoolInput `pulumi:"useTopicSchema"` // When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. WriteMetadata pulumi.BoolInput `pulumi:"writeMetadata"` } @@ -4746,6 +4797,11 @@ func (o GetSubscriptionCloudStorageConfigAvroConfigOutput) ToGetSubscriptionClou return o } +// When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. +func (o GetSubscriptionCloudStorageConfigAvroConfigOutput) UseTopicSchema() pulumi.BoolOutput { + return o.ApplyT(func(v GetSubscriptionCloudStorageConfigAvroConfig) bool { return v.UseTopicSchema }).(pulumi.BoolOutput) +} + // When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. func (o GetSubscriptionCloudStorageConfigAvroConfigOutput) WriteMetadata() pulumi.BoolOutput { return o.ApplyT(func(v GetSubscriptionCloudStorageConfigAvroConfig) bool { return v.WriteMetadata }).(pulumi.BoolOutput) diff --git a/sdk/go/gcp/pubsub/subscription.go b/sdk/go/gcp/pubsub/subscription.go index e11351f87c..59cbdf0d86 100644 --- a/sdk/go/gcp/pubsub/subscription.go +++ b/sdk/go/gcp/pubsub/subscription.go @@ -503,6 +503,7 @@ import ( // FilenameDatetimeFormat: pulumi.String("YYYY-MM-DD/hh_mm_ssZ"), // MaxBytes: pulumi.Int(1000), // MaxDuration: pulumi.String("300s"), +// MaxMessages: pulumi.Int(1000), // }, // }, pulumi.DependsOn([]pulumi.Resource{ // example, @@ -570,8 +571,10 @@ import ( // FilenameDatetimeFormat: pulumi.String("YYYY-MM-DD/hh_mm_ssZ"), // MaxBytes: pulumi.Int(1000), // MaxDuration: pulumi.String("300s"), +// MaxMessages: pulumi.Int(1000), // AvroConfig: &pubsub.SubscriptionCloudStorageConfigAvroConfigArgs{ -// WriteMetadata: pulumi.Bool(true), +// WriteMetadata: pulumi.Bool(true), +// UseTopicSchema: pulumi.Bool(true), // }, // }, // }, pulumi.DependsOn([]pulumi.Resource{ diff --git a/sdk/go/gcp/redis/cluster.go b/sdk/go/gcp/redis/cluster.go index dcfb16d43d..06dad8a24c 100644 --- a/sdk/go/gcp/redis/cluster.go +++ b/sdk/go/gcp/redis/cluster.go @@ -89,6 +89,19 @@ import ( // ZoneDistributionConfig: &redis.ClusterZoneDistributionConfigArgs{ // Mode: pulumi.String("MULTI_ZONE"), // }, +// MaintenancePolicy: &redis.ClusterMaintenancePolicyArgs{ +// WeeklyMaintenanceWindows: redis.ClusterMaintenancePolicyWeeklyMaintenanceWindowArray{ +// &redis.ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs{ +// Day: pulumi.String("MONDAY"), +// StartTime: &redis.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs{ +// Hours: pulumi.Int(1), +// Minutes: pulumi.Int(0), +// Seconds: pulumi.Int(0), +// Nanos: pulumi.Int(0), +// }, +// }, +// }, +// }, // }, pulumi.DependsOn([]pulumi.Resource{ // _default, // })) @@ -160,6 +173,19 @@ import ( // Mode: pulumi.String("SINGLE_ZONE"), // Zone: pulumi.String("us-central1-f"), // }, +// MaintenancePolicy: &redis.ClusterMaintenancePolicyArgs{ +// WeeklyMaintenanceWindows: redis.ClusterMaintenancePolicyWeeklyMaintenanceWindowArray{ +// &redis.ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs{ +// Day: pulumi.String("MONDAY"), +// StartTime: &redis.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs{ +// Hours: pulumi.Int(1), +// Minutes: pulumi.Int(0), +// Seconds: pulumi.Int(0), +// Nanos: pulumi.Int(0), +// }, +// }, +// }, +// }, // DeletionProtectionEnabled: pulumi.Bool(true), // }, pulumi.DependsOn([]pulumi.Resource{ // _default, @@ -221,6 +247,11 @@ type Cluster struct { // Currently only one endpoint is supported. // Structure is documented below. DiscoveryEndpoints ClusterDiscoveryEndpointArrayOutput `pulumi:"discoveryEndpoints"` + // Maintenance policy for a cluster + MaintenancePolicy ClusterMaintenancePolicyPtrOutput `pulumi:"maintenancePolicy"` + // Upcoming maintenance schedule. + // Structure is documented below. + MaintenanceSchedules ClusterMaintenanceScheduleArrayOutput `pulumi:"maintenanceSchedules"` // Unique name of the resource in this scope including project and location using the form: // projects/{projectId}/locations/{locationId}/clusters/{clusterId} Name pulumi.StringOutput `pulumi:"name"` @@ -317,6 +348,11 @@ type clusterState struct { // Currently only one endpoint is supported. // Structure is documented below. DiscoveryEndpoints []ClusterDiscoveryEndpoint `pulumi:"discoveryEndpoints"` + // Maintenance policy for a cluster + MaintenancePolicy *ClusterMaintenancePolicy `pulumi:"maintenancePolicy"` + // Upcoming maintenance schedule. + // Structure is documented below. + MaintenanceSchedules []ClusterMaintenanceSchedule `pulumi:"maintenanceSchedules"` // Unique name of the resource in this scope including project and location using the form: // projects/{projectId}/locations/{locationId}/clusters/{clusterId} Name *string `pulumi:"name"` @@ -378,6 +414,11 @@ type ClusterState struct { // Currently only one endpoint is supported. // Structure is documented below. DiscoveryEndpoints ClusterDiscoveryEndpointArrayInput + // Maintenance policy for a cluster + MaintenancePolicy ClusterMaintenancePolicyPtrInput + // Upcoming maintenance schedule. + // Structure is documented below. + MaintenanceSchedules ClusterMaintenanceScheduleArrayInput // Unique name of the resource in this scope including project and location using the form: // projects/{projectId}/locations/{locationId}/clusters/{clusterId} Name pulumi.StringPtrInput @@ -434,6 +475,8 @@ type clusterArgs struct { // Optional. Indicates if the cluster is deletion protected or not. If the value if set to true, any delete cluster // operation will fail. Default value is true. DeletionProtectionEnabled *bool `pulumi:"deletionProtectionEnabled"` + // Maintenance policy for a cluster + MaintenancePolicy *ClusterMaintenancePolicy `pulumi:"maintenancePolicy"` // Unique name of the resource in this scope including project and location using the form: // projects/{projectId}/locations/{locationId}/clusters/{clusterId} Name *string `pulumi:"name"` @@ -473,6 +516,8 @@ type ClusterArgs struct { // Optional. Indicates if the cluster is deletion protected or not. If the value if set to true, any delete cluster // operation will fail. Default value is true. DeletionProtectionEnabled pulumi.BoolPtrInput + // Maintenance policy for a cluster + MaintenancePolicy ClusterMaintenancePolicyPtrInput // Unique name of the resource in this scope including project and location using the form: // projects/{projectId}/locations/{locationId}/clusters/{clusterId} Name pulumi.StringPtrInput @@ -618,6 +663,17 @@ func (o ClusterOutput) DiscoveryEndpoints() ClusterDiscoveryEndpointArrayOutput return o.ApplyT(func(v *Cluster) ClusterDiscoveryEndpointArrayOutput { return v.DiscoveryEndpoints }).(ClusterDiscoveryEndpointArrayOutput) } +// Maintenance policy for a cluster +func (o ClusterOutput) MaintenancePolicy() ClusterMaintenancePolicyPtrOutput { + return o.ApplyT(func(v *Cluster) ClusterMaintenancePolicyPtrOutput { return v.MaintenancePolicy }).(ClusterMaintenancePolicyPtrOutput) +} + +// Upcoming maintenance schedule. +// Structure is documented below. +func (o ClusterOutput) MaintenanceSchedules() ClusterMaintenanceScheduleArrayOutput { + return o.ApplyT(func(v *Cluster) ClusterMaintenanceScheduleArrayOutput { return v.MaintenanceSchedules }).(ClusterMaintenanceScheduleArrayOutput) +} + // Unique name of the resource in this scope including project and location using the form: // projects/{projectId}/locations/{locationId}/clusters/{clusterId} func (o ClusterOutput) Name() pulumi.StringOutput { diff --git a/sdk/go/gcp/redis/pulumiTypes.go b/sdk/go/gcp/redis/pulumiTypes.go index f6f8e020f9..24c4f83401 100644 --- a/sdk/go/gcp/redis/pulumiTypes.go +++ b/sdk/go/gcp/redis/pulumiTypes.go @@ -271,6 +271,608 @@ func (o ClusterDiscoveryEndpointPscConfigPtrOutput) Network() pulumi.StringPtrOu }).(pulumi.StringPtrOutput) } +type ClusterMaintenancePolicy struct { + // (Output) + // Output only. The time when the policy was created. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + CreateTime *string `pulumi:"createTime"` + // (Output) + // Output only. The time when the policy was last updated. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + UpdateTime *string `pulumi:"updateTime"` + // Optional. Maintenance window that is applied to resources covered by this policy. + // Minimum 1. For the current version, the maximum number + // of weeklyWindow is expected to be one. + // Structure is documented below. + WeeklyMaintenanceWindows []ClusterMaintenancePolicyWeeklyMaintenanceWindow `pulumi:"weeklyMaintenanceWindows"` +} + +// ClusterMaintenancePolicyInput is an input type that accepts ClusterMaintenancePolicyArgs and ClusterMaintenancePolicyOutput values. +// You can construct a concrete instance of `ClusterMaintenancePolicyInput` via: +// +// ClusterMaintenancePolicyArgs{...} +type ClusterMaintenancePolicyInput interface { + pulumi.Input + + ToClusterMaintenancePolicyOutput() ClusterMaintenancePolicyOutput + ToClusterMaintenancePolicyOutputWithContext(context.Context) ClusterMaintenancePolicyOutput +} + +type ClusterMaintenancePolicyArgs struct { + // (Output) + // Output only. The time when the policy was created. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + CreateTime pulumi.StringPtrInput `pulumi:"createTime"` + // (Output) + // Output only. The time when the policy was last updated. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + UpdateTime pulumi.StringPtrInput `pulumi:"updateTime"` + // Optional. Maintenance window that is applied to resources covered by this policy. + // Minimum 1. For the current version, the maximum number + // of weeklyWindow is expected to be one. + // Structure is documented below. + WeeklyMaintenanceWindows ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayInput `pulumi:"weeklyMaintenanceWindows"` +} + +func (ClusterMaintenancePolicyArgs) ElementType() reflect.Type { + return reflect.TypeOf((*ClusterMaintenancePolicy)(nil)).Elem() +} + +func (i ClusterMaintenancePolicyArgs) ToClusterMaintenancePolicyOutput() ClusterMaintenancePolicyOutput { + return i.ToClusterMaintenancePolicyOutputWithContext(context.Background()) +} + +func (i ClusterMaintenancePolicyArgs) ToClusterMaintenancePolicyOutputWithContext(ctx context.Context) ClusterMaintenancePolicyOutput { + return pulumi.ToOutputWithContext(ctx, i).(ClusterMaintenancePolicyOutput) +} + +func (i ClusterMaintenancePolicyArgs) ToClusterMaintenancePolicyPtrOutput() ClusterMaintenancePolicyPtrOutput { + return i.ToClusterMaintenancePolicyPtrOutputWithContext(context.Background()) +} + +func (i ClusterMaintenancePolicyArgs) ToClusterMaintenancePolicyPtrOutputWithContext(ctx context.Context) ClusterMaintenancePolicyPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(ClusterMaintenancePolicyOutput).ToClusterMaintenancePolicyPtrOutputWithContext(ctx) +} + +// ClusterMaintenancePolicyPtrInput is an input type that accepts ClusterMaintenancePolicyArgs, ClusterMaintenancePolicyPtr and ClusterMaintenancePolicyPtrOutput values. +// You can construct a concrete instance of `ClusterMaintenancePolicyPtrInput` via: +// +// ClusterMaintenancePolicyArgs{...} +// +// or: +// +// nil +type ClusterMaintenancePolicyPtrInput interface { + pulumi.Input + + ToClusterMaintenancePolicyPtrOutput() ClusterMaintenancePolicyPtrOutput + ToClusterMaintenancePolicyPtrOutputWithContext(context.Context) ClusterMaintenancePolicyPtrOutput +} + +type clusterMaintenancePolicyPtrType ClusterMaintenancePolicyArgs + +func ClusterMaintenancePolicyPtr(v *ClusterMaintenancePolicyArgs) ClusterMaintenancePolicyPtrInput { + return (*clusterMaintenancePolicyPtrType)(v) +} + +func (*clusterMaintenancePolicyPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**ClusterMaintenancePolicy)(nil)).Elem() +} + +func (i *clusterMaintenancePolicyPtrType) ToClusterMaintenancePolicyPtrOutput() ClusterMaintenancePolicyPtrOutput { + return i.ToClusterMaintenancePolicyPtrOutputWithContext(context.Background()) +} + +func (i *clusterMaintenancePolicyPtrType) ToClusterMaintenancePolicyPtrOutputWithContext(ctx context.Context) ClusterMaintenancePolicyPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(ClusterMaintenancePolicyPtrOutput) +} + +type ClusterMaintenancePolicyOutput struct{ *pulumi.OutputState } + +func (ClusterMaintenancePolicyOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ClusterMaintenancePolicy)(nil)).Elem() +} + +func (o ClusterMaintenancePolicyOutput) ToClusterMaintenancePolicyOutput() ClusterMaintenancePolicyOutput { + return o +} + +func (o ClusterMaintenancePolicyOutput) ToClusterMaintenancePolicyOutputWithContext(ctx context.Context) ClusterMaintenancePolicyOutput { + return o +} + +func (o ClusterMaintenancePolicyOutput) ToClusterMaintenancePolicyPtrOutput() ClusterMaintenancePolicyPtrOutput { + return o.ToClusterMaintenancePolicyPtrOutputWithContext(context.Background()) +} + +func (o ClusterMaintenancePolicyOutput) ToClusterMaintenancePolicyPtrOutputWithContext(ctx context.Context) ClusterMaintenancePolicyPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v ClusterMaintenancePolicy) *ClusterMaintenancePolicy { + return &v + }).(ClusterMaintenancePolicyPtrOutput) +} + +// (Output) +// Output only. The time when the policy was created. +// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +// resolution and up to nine fractional digits. +func (o ClusterMaintenancePolicyOutput) CreateTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v ClusterMaintenancePolicy) *string { return v.CreateTime }).(pulumi.StringPtrOutput) +} + +// (Output) +// Output only. The time when the policy was last updated. +// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +// resolution and up to nine fractional digits. +func (o ClusterMaintenancePolicyOutput) UpdateTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v ClusterMaintenancePolicy) *string { return v.UpdateTime }).(pulumi.StringPtrOutput) +} + +// Optional. Maintenance window that is applied to resources covered by this policy. +// Minimum 1. For the current version, the maximum number +// of weeklyWindow is expected to be one. +// Structure is documented below. +func (o ClusterMaintenancePolicyOutput) WeeklyMaintenanceWindows() ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput { + return o.ApplyT(func(v ClusterMaintenancePolicy) []ClusterMaintenancePolicyWeeklyMaintenanceWindow { + return v.WeeklyMaintenanceWindows + }).(ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput) +} + +type ClusterMaintenancePolicyPtrOutput struct{ *pulumi.OutputState } + +func (ClusterMaintenancePolicyPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**ClusterMaintenancePolicy)(nil)).Elem() +} + +func (o ClusterMaintenancePolicyPtrOutput) ToClusterMaintenancePolicyPtrOutput() ClusterMaintenancePolicyPtrOutput { + return o +} + +func (o ClusterMaintenancePolicyPtrOutput) ToClusterMaintenancePolicyPtrOutputWithContext(ctx context.Context) ClusterMaintenancePolicyPtrOutput { + return o +} + +func (o ClusterMaintenancePolicyPtrOutput) Elem() ClusterMaintenancePolicyOutput { + return o.ApplyT(func(v *ClusterMaintenancePolicy) ClusterMaintenancePolicy { + if v != nil { + return *v + } + var ret ClusterMaintenancePolicy + return ret + }).(ClusterMaintenancePolicyOutput) +} + +// (Output) +// Output only. The time when the policy was created. +// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +// resolution and up to nine fractional digits. +func (o ClusterMaintenancePolicyPtrOutput) CreateTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ClusterMaintenancePolicy) *string { + if v == nil { + return nil + } + return v.CreateTime + }).(pulumi.StringPtrOutput) +} + +// (Output) +// Output only. The time when the policy was last updated. +// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +// resolution and up to nine fractional digits. +func (o ClusterMaintenancePolicyPtrOutput) UpdateTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ClusterMaintenancePolicy) *string { + if v == nil { + return nil + } + return v.UpdateTime + }).(pulumi.StringPtrOutput) +} + +// Optional. Maintenance window that is applied to resources covered by this policy. +// Minimum 1. For the current version, the maximum number +// of weeklyWindow is expected to be one. +// Structure is documented below. +func (o ClusterMaintenancePolicyPtrOutput) WeeklyMaintenanceWindows() ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput { + return o.ApplyT(func(v *ClusterMaintenancePolicy) []ClusterMaintenancePolicyWeeklyMaintenanceWindow { + if v == nil { + return nil + } + return v.WeeklyMaintenanceWindows + }).(ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput) +} + +type ClusterMaintenancePolicyWeeklyMaintenanceWindow struct { + // Required. The day of week that maintenance updates occur. + // - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + // - MONDAY: Monday + // - TUESDAY: Tuesday + // - WEDNESDAY: Wednesday + // - THURSDAY: Thursday + // - FRIDAY: Friday + // - SATURDAY: Saturday + // - SUNDAY: Sunday + // Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + Day string `pulumi:"day"` + // (Output) + // Output only. Duration of the maintenance window. + // The current window is fixed at 1 hour. + // A duration in seconds with up to nine fractional digits, + // terminated by 's'. Example: "3.5s". + Duration *string `pulumi:"duration"` + // Required. Start time of the window in UTC time. + // Structure is documented below. + StartTime ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime `pulumi:"startTime"` +} + +// ClusterMaintenancePolicyWeeklyMaintenanceWindowInput is an input type that accepts ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs and ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput values. +// You can construct a concrete instance of `ClusterMaintenancePolicyWeeklyMaintenanceWindowInput` via: +// +// ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs{...} +type ClusterMaintenancePolicyWeeklyMaintenanceWindowInput interface { + pulumi.Input + + ToClusterMaintenancePolicyWeeklyMaintenanceWindowOutput() ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput + ToClusterMaintenancePolicyWeeklyMaintenanceWindowOutputWithContext(context.Context) ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput +} + +type ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs struct { + // Required. The day of week that maintenance updates occur. + // - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + // - MONDAY: Monday + // - TUESDAY: Tuesday + // - WEDNESDAY: Wednesday + // - THURSDAY: Thursday + // - FRIDAY: Friday + // - SATURDAY: Saturday + // - SUNDAY: Sunday + // Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + Day pulumi.StringInput `pulumi:"day"` + // (Output) + // Output only. Duration of the maintenance window. + // The current window is fixed at 1 hour. + // A duration in seconds with up to nine fractional digits, + // terminated by 's'. Example: "3.5s". + Duration pulumi.StringPtrInput `pulumi:"duration"` + // Required. Start time of the window in UTC time. + // Structure is documented below. + StartTime ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeInput `pulumi:"startTime"` +} + +func (ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs) ElementType() reflect.Type { + return reflect.TypeOf((*ClusterMaintenancePolicyWeeklyMaintenanceWindow)(nil)).Elem() +} + +func (i ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs) ToClusterMaintenancePolicyWeeklyMaintenanceWindowOutput() ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput { + return i.ToClusterMaintenancePolicyWeeklyMaintenanceWindowOutputWithContext(context.Background()) +} + +func (i ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs) ToClusterMaintenancePolicyWeeklyMaintenanceWindowOutputWithContext(ctx context.Context) ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput { + return pulumi.ToOutputWithContext(ctx, i).(ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput) +} + +// ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayInput is an input type that accepts ClusterMaintenancePolicyWeeklyMaintenanceWindowArray and ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput values. +// You can construct a concrete instance of `ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayInput` via: +// +// ClusterMaintenancePolicyWeeklyMaintenanceWindowArray{ ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs{...} } +type ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayInput interface { + pulumi.Input + + ToClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput() ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput + ToClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutputWithContext(context.Context) ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput +} + +type ClusterMaintenancePolicyWeeklyMaintenanceWindowArray []ClusterMaintenancePolicyWeeklyMaintenanceWindowInput + +func (ClusterMaintenancePolicyWeeklyMaintenanceWindowArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]ClusterMaintenancePolicyWeeklyMaintenanceWindow)(nil)).Elem() +} + +func (i ClusterMaintenancePolicyWeeklyMaintenanceWindowArray) ToClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput() ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput { + return i.ToClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutputWithContext(context.Background()) +} + +func (i ClusterMaintenancePolicyWeeklyMaintenanceWindowArray) ToClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutputWithContext(ctx context.Context) ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput) +} + +type ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput struct{ *pulumi.OutputState } + +func (ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ClusterMaintenancePolicyWeeklyMaintenanceWindow)(nil)).Elem() +} + +func (o ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput) ToClusterMaintenancePolicyWeeklyMaintenanceWindowOutput() ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput { + return o +} + +func (o ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput) ToClusterMaintenancePolicyWeeklyMaintenanceWindowOutputWithContext(ctx context.Context) ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput { + return o +} + +// Required. The day of week that maintenance updates occur. +// - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. +// - MONDAY: Monday +// - TUESDAY: Tuesday +// - WEDNESDAY: Wednesday +// - THURSDAY: Thursday +// - FRIDAY: Friday +// - SATURDAY: Saturday +// - SUNDAY: Sunday +// Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. +func (o ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput) Day() pulumi.StringOutput { + return o.ApplyT(func(v ClusterMaintenancePolicyWeeklyMaintenanceWindow) string { return v.Day }).(pulumi.StringOutput) +} + +// (Output) +// Output only. Duration of the maintenance window. +// The current window is fixed at 1 hour. +// A duration in seconds with up to nine fractional digits, +// terminated by 's'. Example: "3.5s". +func (o ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput) Duration() pulumi.StringPtrOutput { + return o.ApplyT(func(v ClusterMaintenancePolicyWeeklyMaintenanceWindow) *string { return v.Duration }).(pulumi.StringPtrOutput) +} + +// Required. Start time of the window in UTC time. +// Structure is documented below. +func (o ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput) StartTime() ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput { + return o.ApplyT(func(v ClusterMaintenancePolicyWeeklyMaintenanceWindow) ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime { + return v.StartTime + }).(ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) +} + +type ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput struct{ *pulumi.OutputState } + +func (ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]ClusterMaintenancePolicyWeeklyMaintenanceWindow)(nil)).Elem() +} + +func (o ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput) ToClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput() ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput { + return o +} + +func (o ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput) ToClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutputWithContext(ctx context.Context) ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput { + return o +} + +func (o ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput) Index(i pulumi.IntInput) ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) ClusterMaintenancePolicyWeeklyMaintenanceWindow { + return vs[0].([]ClusterMaintenancePolicyWeeklyMaintenanceWindow)[vs[1].(int)] + }).(ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput) +} + +type ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime struct { + // Hours of day in 24 hour format. Should be from 0 to 23. + // An API may choose to allow the value "24:00:00" for scenarios like business closing time. + Hours *int `pulumi:"hours"` + // Minutes of hour of day. Must be from 0 to 59. + Minutes *int `pulumi:"minutes"` + // Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + Nanos *int `pulumi:"nanos"` + // Seconds of minutes of the time. Must normally be from 0 to 59. + // An API may allow the value 60 if it allows leap-seconds. + Seconds *int `pulumi:"seconds"` +} + +// ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeInput is an input type that accepts ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs and ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput values. +// You can construct a concrete instance of `ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeInput` via: +// +// ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs{...} +type ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeInput interface { + pulumi.Input + + ToClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput() ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput + ToClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutputWithContext(context.Context) ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput +} + +type ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs struct { + // Hours of day in 24 hour format. Should be from 0 to 23. + // An API may choose to allow the value "24:00:00" for scenarios like business closing time. + Hours pulumi.IntPtrInput `pulumi:"hours"` + // Minutes of hour of day. Must be from 0 to 59. + Minutes pulumi.IntPtrInput `pulumi:"minutes"` + // Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + Nanos pulumi.IntPtrInput `pulumi:"nanos"` + // Seconds of minutes of the time. Must normally be from 0 to 59. + // An API may allow the value 60 if it allows leap-seconds. + Seconds pulumi.IntPtrInput `pulumi:"seconds"` +} + +func (ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs) ElementType() reflect.Type { + return reflect.TypeOf((*ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime)(nil)).Elem() +} + +func (i ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs) ToClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput() ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput { + return i.ToClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutputWithContext(context.Background()) +} + +func (i ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs) ToClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutputWithContext(ctx context.Context) ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput { + return pulumi.ToOutputWithContext(ctx, i).(ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) +} + +type ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput struct{ *pulumi.OutputState } + +func (ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime)(nil)).Elem() +} + +func (o ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) ToClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput() ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput { + return o +} + +func (o ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) ToClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutputWithContext(ctx context.Context) ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput { + return o +} + +// Hours of day in 24 hour format. Should be from 0 to 23. +// An API may choose to allow the value "24:00:00" for scenarios like business closing time. +func (o ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) Hours() pulumi.IntPtrOutput { + return o.ApplyT(func(v ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime) *int { return v.Hours }).(pulumi.IntPtrOutput) +} + +// Minutes of hour of day. Must be from 0 to 59. +func (o ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) Minutes() pulumi.IntPtrOutput { + return o.ApplyT(func(v ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime) *int { return v.Minutes }).(pulumi.IntPtrOutput) +} + +// Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. +func (o ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) Nanos() pulumi.IntPtrOutput { + return o.ApplyT(func(v ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime) *int { return v.Nanos }).(pulumi.IntPtrOutput) +} + +// Seconds of minutes of the time. Must normally be from 0 to 59. +// An API may allow the value 60 if it allows leap-seconds. +func (o ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput) Seconds() pulumi.IntPtrOutput { + return o.ApplyT(func(v ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime) *int { return v.Seconds }).(pulumi.IntPtrOutput) +} + +type ClusterMaintenanceSchedule struct { + // (Output) + // Output only. The end time of any upcoming scheduled maintenance for this cluster. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + EndTime *string `pulumi:"endTime"` + // (Output) + // Output only. The deadline that the maintenance schedule start time + // can not go beyond, including reschedule. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + ScheduleDeadlineTime *string `pulumi:"scheduleDeadlineTime"` + // (Output) + // Output only. The start time of any upcoming scheduled maintenance for this cluster. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + StartTime *string `pulumi:"startTime"` +} + +// ClusterMaintenanceScheduleInput is an input type that accepts ClusterMaintenanceScheduleArgs and ClusterMaintenanceScheduleOutput values. +// You can construct a concrete instance of `ClusterMaintenanceScheduleInput` via: +// +// ClusterMaintenanceScheduleArgs{...} +type ClusterMaintenanceScheduleInput interface { + pulumi.Input + + ToClusterMaintenanceScheduleOutput() ClusterMaintenanceScheduleOutput + ToClusterMaintenanceScheduleOutputWithContext(context.Context) ClusterMaintenanceScheduleOutput +} + +type ClusterMaintenanceScheduleArgs struct { + // (Output) + // Output only. The end time of any upcoming scheduled maintenance for this cluster. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + EndTime pulumi.StringPtrInput `pulumi:"endTime"` + // (Output) + // Output only. The deadline that the maintenance schedule start time + // can not go beyond, including reschedule. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + ScheduleDeadlineTime pulumi.StringPtrInput `pulumi:"scheduleDeadlineTime"` + // (Output) + // Output only. The start time of any upcoming scheduled maintenance for this cluster. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + // resolution and up to nine fractional digits. + StartTime pulumi.StringPtrInput `pulumi:"startTime"` +} + +func (ClusterMaintenanceScheduleArgs) ElementType() reflect.Type { + return reflect.TypeOf((*ClusterMaintenanceSchedule)(nil)).Elem() +} + +func (i ClusterMaintenanceScheduleArgs) ToClusterMaintenanceScheduleOutput() ClusterMaintenanceScheduleOutput { + return i.ToClusterMaintenanceScheduleOutputWithContext(context.Background()) +} + +func (i ClusterMaintenanceScheduleArgs) ToClusterMaintenanceScheduleOutputWithContext(ctx context.Context) ClusterMaintenanceScheduleOutput { + return pulumi.ToOutputWithContext(ctx, i).(ClusterMaintenanceScheduleOutput) +} + +// ClusterMaintenanceScheduleArrayInput is an input type that accepts ClusterMaintenanceScheduleArray and ClusterMaintenanceScheduleArrayOutput values. +// You can construct a concrete instance of `ClusterMaintenanceScheduleArrayInput` via: +// +// ClusterMaintenanceScheduleArray{ ClusterMaintenanceScheduleArgs{...} } +type ClusterMaintenanceScheduleArrayInput interface { + pulumi.Input + + ToClusterMaintenanceScheduleArrayOutput() ClusterMaintenanceScheduleArrayOutput + ToClusterMaintenanceScheduleArrayOutputWithContext(context.Context) ClusterMaintenanceScheduleArrayOutput +} + +type ClusterMaintenanceScheduleArray []ClusterMaintenanceScheduleInput + +func (ClusterMaintenanceScheduleArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]ClusterMaintenanceSchedule)(nil)).Elem() +} + +func (i ClusterMaintenanceScheduleArray) ToClusterMaintenanceScheduleArrayOutput() ClusterMaintenanceScheduleArrayOutput { + return i.ToClusterMaintenanceScheduleArrayOutputWithContext(context.Background()) +} + +func (i ClusterMaintenanceScheduleArray) ToClusterMaintenanceScheduleArrayOutputWithContext(ctx context.Context) ClusterMaintenanceScheduleArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(ClusterMaintenanceScheduleArrayOutput) +} + +type ClusterMaintenanceScheduleOutput struct{ *pulumi.OutputState } + +func (ClusterMaintenanceScheduleOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ClusterMaintenanceSchedule)(nil)).Elem() +} + +func (o ClusterMaintenanceScheduleOutput) ToClusterMaintenanceScheduleOutput() ClusterMaintenanceScheduleOutput { + return o +} + +func (o ClusterMaintenanceScheduleOutput) ToClusterMaintenanceScheduleOutputWithContext(ctx context.Context) ClusterMaintenanceScheduleOutput { + return o +} + +// (Output) +// Output only. The end time of any upcoming scheduled maintenance for this cluster. +// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +// resolution and up to nine fractional digits. +func (o ClusterMaintenanceScheduleOutput) EndTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v ClusterMaintenanceSchedule) *string { return v.EndTime }).(pulumi.StringPtrOutput) +} + +// (Output) +// Output only. The deadline that the maintenance schedule start time +// can not go beyond, including reschedule. +// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +// resolution and up to nine fractional digits. +func (o ClusterMaintenanceScheduleOutput) ScheduleDeadlineTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v ClusterMaintenanceSchedule) *string { return v.ScheduleDeadlineTime }).(pulumi.StringPtrOutput) +} + +// (Output) +// Output only. The start time of any upcoming scheduled maintenance for this cluster. +// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond +// resolution and up to nine fractional digits. +func (o ClusterMaintenanceScheduleOutput) StartTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v ClusterMaintenanceSchedule) *string { return v.StartTime }).(pulumi.StringPtrOutput) +} + +type ClusterMaintenanceScheduleArrayOutput struct{ *pulumi.OutputState } + +func (ClusterMaintenanceScheduleArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]ClusterMaintenanceSchedule)(nil)).Elem() +} + +func (o ClusterMaintenanceScheduleArrayOutput) ToClusterMaintenanceScheduleArrayOutput() ClusterMaintenanceScheduleArrayOutput { + return o +} + +func (o ClusterMaintenanceScheduleArrayOutput) ToClusterMaintenanceScheduleArrayOutputWithContext(ctx context.Context) ClusterMaintenanceScheduleArrayOutput { + return o +} + +func (o ClusterMaintenanceScheduleArrayOutput) Index(i pulumi.IntInput) ClusterMaintenanceScheduleOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) ClusterMaintenanceSchedule { + return vs[0].([]ClusterMaintenanceSchedule)[vs[1].(int)] + }).(ClusterMaintenanceScheduleOutput) +} + type ClusterPscConfig struct { // Required. The consumer network where the network address of // the discovery endpoint will be reserved, in the form of @@ -3069,6 +3671,13 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*ClusterDiscoveryEndpointArrayInput)(nil)).Elem(), ClusterDiscoveryEndpointArray{}) pulumi.RegisterInputType(reflect.TypeOf((*ClusterDiscoveryEndpointPscConfigInput)(nil)).Elem(), ClusterDiscoveryEndpointPscConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ClusterDiscoveryEndpointPscConfigPtrInput)(nil)).Elem(), ClusterDiscoveryEndpointPscConfigArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ClusterMaintenancePolicyInput)(nil)).Elem(), ClusterMaintenancePolicyArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ClusterMaintenancePolicyPtrInput)(nil)).Elem(), ClusterMaintenancePolicyArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ClusterMaintenancePolicyWeeklyMaintenanceWindowInput)(nil)).Elem(), ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayInput)(nil)).Elem(), ClusterMaintenancePolicyWeeklyMaintenanceWindowArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeInput)(nil)).Elem(), ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ClusterMaintenanceScheduleInput)(nil)).Elem(), ClusterMaintenanceScheduleArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*ClusterMaintenanceScheduleArrayInput)(nil)).Elem(), ClusterMaintenanceScheduleArray{}) pulumi.RegisterInputType(reflect.TypeOf((*ClusterPscConfigInput)(nil)).Elem(), ClusterPscConfigArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*ClusterPscConfigArrayInput)(nil)).Elem(), ClusterPscConfigArray{}) pulumi.RegisterInputType(reflect.TypeOf((*ClusterPscConnectionInput)(nil)).Elem(), ClusterPscConnectionArgs{}) @@ -3110,6 +3719,13 @@ func init() { pulumi.RegisterOutputType(ClusterDiscoveryEndpointArrayOutput{}) pulumi.RegisterOutputType(ClusterDiscoveryEndpointPscConfigOutput{}) pulumi.RegisterOutputType(ClusterDiscoveryEndpointPscConfigPtrOutput{}) + pulumi.RegisterOutputType(ClusterMaintenancePolicyOutput{}) + pulumi.RegisterOutputType(ClusterMaintenancePolicyPtrOutput{}) + pulumi.RegisterOutputType(ClusterMaintenancePolicyWeeklyMaintenanceWindowOutput{}) + pulumi.RegisterOutputType(ClusterMaintenancePolicyWeeklyMaintenanceWindowArrayOutput{}) + pulumi.RegisterOutputType(ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeOutput{}) + pulumi.RegisterOutputType(ClusterMaintenanceScheduleOutput{}) + pulumi.RegisterOutputType(ClusterMaintenanceScheduleArrayOutput{}) pulumi.RegisterOutputType(ClusterPscConfigOutput{}) pulumi.RegisterOutputType(ClusterPscConfigArrayOutput{}) pulumi.RegisterOutputType(ClusterPscConnectionOutput{}) diff --git a/sdk/go/gcp/securitycenter/init.go b/sdk/go/gcp/securitycenter/init.go index c0d51731a2..202c73d8da 100644 --- a/sdk/go/gcp/securitycenter/init.go +++ b/sdk/go/gcp/securitycenter/init.go @@ -63,6 +63,8 @@ func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi r = &V2FolderMuteConfig{} case "gcp:securitycenter/v2FolderNotificationConfig:V2FolderNotificationConfig": r = &V2FolderNotificationConfig{} + case "gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport": + r = &V2FolderSccBigQueryExport{} case "gcp:securitycenter/v2OrganizationMuteConfig:V2OrganizationMuteConfig": r = &V2OrganizationMuteConfig{} case "gcp:securitycenter/v2OrganizationNotificationConfig:V2OrganizationNotificationConfig": @@ -81,6 +83,8 @@ func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi r = &V2ProjectMuteConfig{} case "gcp:securitycenter/v2ProjectNotificationConfig:V2ProjectNotificationConfig": r = &V2ProjectNotificationConfig{} + case "gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport": + r = &V2ProjectSccBigQueryExport{} default: return nil, fmt.Errorf("unknown resource type: %s", typ) } @@ -199,6 +203,11 @@ func init() { "securitycenter/v2FolderNotificationConfig", &module{version}, ) + pulumi.RegisterResourceModule( + "gcp", + "securitycenter/v2FolderSccBigQueryExport", + &module{version}, + ) pulumi.RegisterResourceModule( "gcp", "securitycenter/v2OrganizationMuteConfig", @@ -244,4 +253,9 @@ func init() { "securitycenter/v2ProjectNotificationConfig", &module{version}, ) + pulumi.RegisterResourceModule( + "gcp", + "securitycenter/v2ProjectSccBigQueryExport", + &module{version}, + ) } diff --git a/sdk/go/gcp/securitycenter/v2folderSccBigQueryExport.go b/sdk/go/gcp/securitycenter/v2folderSccBigQueryExport.go new file mode 100644 index 0000000000..cc909d8f7a --- /dev/null +++ b/sdk/go/gcp/securitycenter/v2folderSccBigQueryExport.go @@ -0,0 +1,616 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package securitycenter + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// A Cloud Security Command Center (Cloud SCC) Big Query Export Config. +// It represents exporting Security Command Center data, including assets, findings, and security marks +// using gcloud scc bqexports +// > **Note:** In order to use Cloud SCC resources, your organization must be enrolled +// in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). +// Without doing so, you may run into errors during resource creation. +// +// To get more information about FolderSccBigQueryExport, see: +// +// * [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/folders.locations.bigQueryExports) +// * How-to Guides +// - [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query) +// +// ## Example Usage +// +// ### Scc V2 Folder Big Query Export Config Basic +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/bigquery" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations" +// "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/securitycenter" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// folder, err := organizations.NewFolder(ctx, "folder", &organizations.FolderArgs{ +// Parent: pulumi.String("organizations/123456789"), +// DisplayName: pulumi.String("folder-name"), +// DeletionProtection: pulumi.Bool(false), +// }) +// if err != nil { +// return err +// } +// _, err = bigquery.NewDataset(ctx, "default", &bigquery.DatasetArgs{ +// DatasetId: pulumi.String("my_dataset_id"), +// FriendlyName: pulumi.String("test"), +// Description: pulumi.String("This is a test description"), +// Location: pulumi.String("US"), +// DefaultTableExpirationMs: pulumi.Int(3600000), +// DefaultPartitionExpirationMs: nil, +// Labels: pulumi.StringMap{ +// "env": pulumi.String("default"), +// }, +// }) +// if err != nil { +// return err +// } +// _, err = securitycenter.NewV2FolderSccBigQueryExport(ctx, "custom_big_query_export_config", &securitycenter.V2FolderSccBigQueryExportArgs{ +// BigQueryExportId: pulumi.String("my-export"), +// Folder: folder.FolderId, +// Dataset: _default.ID(), +// Location: pulumi.String("global"), +// Description: pulumi.String("Cloud Security Command Center Findings Big Query Export Config"), +// Filter: pulumi.String("state=\"ACTIVE\" AND NOT mute=\"MUTED\""), +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// +// ## Import +// +// FolderSccBigQueryExport can be imported using any of these accepted formats: +// +// * `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}` +// +// * `{{folder}}/{{location}}/{{big_query_export_id}}` +// +// When using the `pulumi import` command, FolderSccBigQueryExport can be imported using one of the formats above. For example: +// +// ```sh +// $ pulumi import gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport default folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} +// ``` +// +// ```sh +// $ pulumi import gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport default {{folder}}/{{location}}/{{big_query_export_id}} +// ``` +type V2FolderSccBigQueryExport struct { + pulumi.CustomResourceState + + // This must be unique within the organization. It must consist of only lowercase letters, + // numbers, and hyphens, must start with a letter, must end with either a letter or a number, + // and must be 63 characters or less. + // + // *** + BigQueryExportId pulumi.StringOutput `pulumi:"bigQueryExportId"` + // The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + // Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + CreateTime pulumi.StringOutput `pulumi:"createTime"` + // The dataset to write findings' updates to. + // Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + // BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + Dataset pulumi.StringPtrOutput `pulumi:"dataset"` + // The description of the notification config (max of 1024 characters). + Description pulumi.StringPtrOutput `pulumi:"description"` + // Expression that defines the filter to apply across create/update + // events of findings. The + // expression is a list of zero or more restrictions combined via + // logical operators AND and OR. Parentheses are supported, and OR + // has higher precedence than AND. + // Restrictions have the form and may have + // a - character in front of them to indicate negation. The fields + // map to those defined in the corresponding resource. + // The supported operators are: + // * = for all value types. + // * > , <, >=, <= for integer values. + // * :, meaning substring matching, for strings. + // The supported value types are: + // * string literals in quotes. + // * integer literals without quotes. + // * boolean literals true and false without quotes. + // See + // [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + // for information on how to write a filter. + Filter pulumi.StringPtrOutput `pulumi:"filter"` + // The folder where Cloud Security Command Center Big Query Export + // Config lives in. + Folder pulumi.StringOutput `pulumi:"folder"` + // The BigQuery export configuration is stored in this location. If not provided, Use global as default. + Location pulumi.StringPtrOutput `pulumi:"location"` + // Email address of the user who last edited the BigQuery export. + // This field is set by the server and will be ignored if provided on export creation or update. + MostRecentEditor pulumi.StringOutput `pulumi:"mostRecentEditor"` + // The resource name of this export, in the format + // `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + // This field is provided in responses, and is ignored when provided in create requests. + Name pulumi.StringOutput `pulumi:"name"` + // The service account that needs permission to create table and upload data to the BigQuery dataset. + Principal pulumi.StringOutput `pulumi:"principal"` + // The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + // Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + UpdateTime pulumi.StringOutput `pulumi:"updateTime"` +} + +// NewV2FolderSccBigQueryExport registers a new resource with the given unique name, arguments, and options. +func NewV2FolderSccBigQueryExport(ctx *pulumi.Context, + name string, args *V2FolderSccBigQueryExportArgs, opts ...pulumi.ResourceOption) (*V2FolderSccBigQueryExport, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.BigQueryExportId == nil { + return nil, errors.New("invalid value for required argument 'BigQueryExportId'") + } + if args.Folder == nil { + return nil, errors.New("invalid value for required argument 'Folder'") + } + opts = internal.PkgResourceDefaultOpts(opts) + var resource V2FolderSccBigQueryExport + err := ctx.RegisterResource("gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetV2FolderSccBigQueryExport gets an existing V2FolderSccBigQueryExport resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetV2FolderSccBigQueryExport(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *V2FolderSccBigQueryExportState, opts ...pulumi.ResourceOption) (*V2FolderSccBigQueryExport, error) { + var resource V2FolderSccBigQueryExport + err := ctx.ReadResource("gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering V2FolderSccBigQueryExport resources. +type v2folderSccBigQueryExportState struct { + // This must be unique within the organization. It must consist of only lowercase letters, + // numbers, and hyphens, must start with a letter, must end with either a letter or a number, + // and must be 63 characters or less. + // + // *** + BigQueryExportId *string `pulumi:"bigQueryExportId"` + // The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + // Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + CreateTime *string `pulumi:"createTime"` + // The dataset to write findings' updates to. + // Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + // BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + Dataset *string `pulumi:"dataset"` + // The description of the notification config (max of 1024 characters). + Description *string `pulumi:"description"` + // Expression that defines the filter to apply across create/update + // events of findings. The + // expression is a list of zero or more restrictions combined via + // logical operators AND and OR. Parentheses are supported, and OR + // has higher precedence than AND. + // Restrictions have the form and may have + // a - character in front of them to indicate negation. The fields + // map to those defined in the corresponding resource. + // The supported operators are: + // * = for all value types. + // * > , <, >=, <= for integer values. + // * :, meaning substring matching, for strings. + // The supported value types are: + // * string literals in quotes. + // * integer literals without quotes. + // * boolean literals true and false without quotes. + // See + // [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + // for information on how to write a filter. + Filter *string `pulumi:"filter"` + // The folder where Cloud Security Command Center Big Query Export + // Config lives in. + Folder *string `pulumi:"folder"` + // The BigQuery export configuration is stored in this location. If not provided, Use global as default. + Location *string `pulumi:"location"` + // Email address of the user who last edited the BigQuery export. + // This field is set by the server and will be ignored if provided on export creation or update. + MostRecentEditor *string `pulumi:"mostRecentEditor"` + // The resource name of this export, in the format + // `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + // This field is provided in responses, and is ignored when provided in create requests. + Name *string `pulumi:"name"` + // The service account that needs permission to create table and upload data to the BigQuery dataset. + Principal *string `pulumi:"principal"` + // The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + // Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + UpdateTime *string `pulumi:"updateTime"` +} + +type V2FolderSccBigQueryExportState struct { + // This must be unique within the organization. It must consist of only lowercase letters, + // numbers, and hyphens, must start with a letter, must end with either a letter or a number, + // and must be 63 characters or less. + // + // *** + BigQueryExportId pulumi.StringPtrInput + // The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + // Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + CreateTime pulumi.StringPtrInput + // The dataset to write findings' updates to. + // Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + // BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + Dataset pulumi.StringPtrInput + // The description of the notification config (max of 1024 characters). + Description pulumi.StringPtrInput + // Expression that defines the filter to apply across create/update + // events of findings. The + // expression is a list of zero or more restrictions combined via + // logical operators AND and OR. Parentheses are supported, and OR + // has higher precedence than AND. + // Restrictions have the form and may have + // a - character in front of them to indicate negation. The fields + // map to those defined in the corresponding resource. + // The supported operators are: + // * = for all value types. + // * > , <, >=, <= for integer values. + // * :, meaning substring matching, for strings. + // The supported value types are: + // * string literals in quotes. + // * integer literals without quotes. + // * boolean literals true and false without quotes. + // See + // [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + // for information on how to write a filter. + Filter pulumi.StringPtrInput + // The folder where Cloud Security Command Center Big Query Export + // Config lives in. + Folder pulumi.StringPtrInput + // The BigQuery export configuration is stored in this location. If not provided, Use global as default. + Location pulumi.StringPtrInput + // Email address of the user who last edited the BigQuery export. + // This field is set by the server and will be ignored if provided on export creation or update. + MostRecentEditor pulumi.StringPtrInput + // The resource name of this export, in the format + // `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + // This field is provided in responses, and is ignored when provided in create requests. + Name pulumi.StringPtrInput + // The service account that needs permission to create table and upload data to the BigQuery dataset. + Principal pulumi.StringPtrInput + // The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + // Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + UpdateTime pulumi.StringPtrInput +} + +func (V2FolderSccBigQueryExportState) ElementType() reflect.Type { + return reflect.TypeOf((*v2folderSccBigQueryExportState)(nil)).Elem() +} + +type v2folderSccBigQueryExportArgs struct { + // This must be unique within the organization. It must consist of only lowercase letters, + // numbers, and hyphens, must start with a letter, must end with either a letter or a number, + // and must be 63 characters or less. + // + // *** + BigQueryExportId string `pulumi:"bigQueryExportId"` + // The dataset to write findings' updates to. + // Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + // BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + Dataset *string `pulumi:"dataset"` + // The description of the notification config (max of 1024 characters). + Description *string `pulumi:"description"` + // Expression that defines the filter to apply across create/update + // events of findings. The + // expression is a list of zero or more restrictions combined via + // logical operators AND and OR. Parentheses are supported, and OR + // has higher precedence than AND. + // Restrictions have the form and may have + // a - character in front of them to indicate negation. The fields + // map to those defined in the corresponding resource. + // The supported operators are: + // * = for all value types. + // * > , <, >=, <= for integer values. + // * :, meaning substring matching, for strings. + // The supported value types are: + // * string literals in quotes. + // * integer literals without quotes. + // * boolean literals true and false without quotes. + // See + // [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + // for information on how to write a filter. + Filter *string `pulumi:"filter"` + // The folder where Cloud Security Command Center Big Query Export + // Config lives in. + Folder string `pulumi:"folder"` + // The BigQuery export configuration is stored in this location. If not provided, Use global as default. + Location *string `pulumi:"location"` +} + +// The set of arguments for constructing a V2FolderSccBigQueryExport resource. +type V2FolderSccBigQueryExportArgs struct { + // This must be unique within the organization. It must consist of only lowercase letters, + // numbers, and hyphens, must start with a letter, must end with either a letter or a number, + // and must be 63 characters or less. + // + // *** + BigQueryExportId pulumi.StringInput + // The dataset to write findings' updates to. + // Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + // BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + Dataset pulumi.StringPtrInput + // The description of the notification config (max of 1024 characters). + Description pulumi.StringPtrInput + // Expression that defines the filter to apply across create/update + // events of findings. The + // expression is a list of zero or more restrictions combined via + // logical operators AND and OR. Parentheses are supported, and OR + // has higher precedence than AND. + // Restrictions have the form and may have + // a - character in front of them to indicate negation. The fields + // map to those defined in the corresponding resource. + // The supported operators are: + // * = for all value types. + // * > , <, >=, <= for integer values. + // * :, meaning substring matching, for strings. + // The supported value types are: + // * string literals in quotes. + // * integer literals without quotes. + // * boolean literals true and false without quotes. + // See + // [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + // for information on how to write a filter. + Filter pulumi.StringPtrInput + // The folder where Cloud Security Command Center Big Query Export + // Config lives in. + Folder pulumi.StringInput + // The BigQuery export configuration is stored in this location. If not provided, Use global as default. + Location pulumi.StringPtrInput +} + +func (V2FolderSccBigQueryExportArgs) ElementType() reflect.Type { + return reflect.TypeOf((*v2folderSccBigQueryExportArgs)(nil)).Elem() +} + +type V2FolderSccBigQueryExportInput interface { + pulumi.Input + + ToV2FolderSccBigQueryExportOutput() V2FolderSccBigQueryExportOutput + ToV2FolderSccBigQueryExportOutputWithContext(ctx context.Context) V2FolderSccBigQueryExportOutput +} + +func (*V2FolderSccBigQueryExport) ElementType() reflect.Type { + return reflect.TypeOf((**V2FolderSccBigQueryExport)(nil)).Elem() +} + +func (i *V2FolderSccBigQueryExport) ToV2FolderSccBigQueryExportOutput() V2FolderSccBigQueryExportOutput { + return i.ToV2FolderSccBigQueryExportOutputWithContext(context.Background()) +} + +func (i *V2FolderSccBigQueryExport) ToV2FolderSccBigQueryExportOutputWithContext(ctx context.Context) V2FolderSccBigQueryExportOutput { + return pulumi.ToOutputWithContext(ctx, i).(V2FolderSccBigQueryExportOutput) +} + +// V2FolderSccBigQueryExportArrayInput is an input type that accepts V2FolderSccBigQueryExportArray and V2FolderSccBigQueryExportArrayOutput values. +// You can construct a concrete instance of `V2FolderSccBigQueryExportArrayInput` via: +// +// V2FolderSccBigQueryExportArray{ V2FolderSccBigQueryExportArgs{...} } +type V2FolderSccBigQueryExportArrayInput interface { + pulumi.Input + + ToV2FolderSccBigQueryExportArrayOutput() V2FolderSccBigQueryExportArrayOutput + ToV2FolderSccBigQueryExportArrayOutputWithContext(context.Context) V2FolderSccBigQueryExportArrayOutput +} + +type V2FolderSccBigQueryExportArray []V2FolderSccBigQueryExportInput + +func (V2FolderSccBigQueryExportArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]*V2FolderSccBigQueryExport)(nil)).Elem() +} + +func (i V2FolderSccBigQueryExportArray) ToV2FolderSccBigQueryExportArrayOutput() V2FolderSccBigQueryExportArrayOutput { + return i.ToV2FolderSccBigQueryExportArrayOutputWithContext(context.Background()) +} + +func (i V2FolderSccBigQueryExportArray) ToV2FolderSccBigQueryExportArrayOutputWithContext(ctx context.Context) V2FolderSccBigQueryExportArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(V2FolderSccBigQueryExportArrayOutput) +} + +// V2FolderSccBigQueryExportMapInput is an input type that accepts V2FolderSccBigQueryExportMap and V2FolderSccBigQueryExportMapOutput values. +// You can construct a concrete instance of `V2FolderSccBigQueryExportMapInput` via: +// +// V2FolderSccBigQueryExportMap{ "key": V2FolderSccBigQueryExportArgs{...} } +type V2FolderSccBigQueryExportMapInput interface { + pulumi.Input + + ToV2FolderSccBigQueryExportMapOutput() V2FolderSccBigQueryExportMapOutput + ToV2FolderSccBigQueryExportMapOutputWithContext(context.Context) V2FolderSccBigQueryExportMapOutput +} + +type V2FolderSccBigQueryExportMap map[string]V2FolderSccBigQueryExportInput + +func (V2FolderSccBigQueryExportMap) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*V2FolderSccBigQueryExport)(nil)).Elem() +} + +func (i V2FolderSccBigQueryExportMap) ToV2FolderSccBigQueryExportMapOutput() V2FolderSccBigQueryExportMapOutput { + return i.ToV2FolderSccBigQueryExportMapOutputWithContext(context.Background()) +} + +func (i V2FolderSccBigQueryExportMap) ToV2FolderSccBigQueryExportMapOutputWithContext(ctx context.Context) V2FolderSccBigQueryExportMapOutput { + return pulumi.ToOutputWithContext(ctx, i).(V2FolderSccBigQueryExportMapOutput) +} + +type V2FolderSccBigQueryExportOutput struct{ *pulumi.OutputState } + +func (V2FolderSccBigQueryExportOutput) ElementType() reflect.Type { + return reflect.TypeOf((**V2FolderSccBigQueryExport)(nil)).Elem() +} + +func (o V2FolderSccBigQueryExportOutput) ToV2FolderSccBigQueryExportOutput() V2FolderSccBigQueryExportOutput { + return o +} + +func (o V2FolderSccBigQueryExportOutput) ToV2FolderSccBigQueryExportOutputWithContext(ctx context.Context) V2FolderSccBigQueryExportOutput { + return o +} + +// This must be unique within the organization. It must consist of only lowercase letters, +// numbers, and hyphens, must start with a letter, must end with either a letter or a number, +// and must be 63 characters or less. +// +// *** +func (o V2FolderSccBigQueryExportOutput) BigQueryExportId() pulumi.StringOutput { + return o.ApplyT(func(v *V2FolderSccBigQueryExport) pulumi.StringOutput { return v.BigQueryExportId }).(pulumi.StringOutput) +} + +// The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. +// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +// Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". +func (o V2FolderSccBigQueryExportOutput) CreateTime() pulumi.StringOutput { + return o.ApplyT(func(v *V2FolderSccBigQueryExport) pulumi.StringOutput { return v.CreateTime }).(pulumi.StringOutput) +} + +// The dataset to write findings' updates to. +// Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". +// BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). +func (o V2FolderSccBigQueryExportOutput) Dataset() pulumi.StringPtrOutput { + return o.ApplyT(func(v *V2FolderSccBigQueryExport) pulumi.StringPtrOutput { return v.Dataset }).(pulumi.StringPtrOutput) +} + +// The description of the notification config (max of 1024 characters). +func (o V2FolderSccBigQueryExportOutput) Description() pulumi.StringPtrOutput { + return o.ApplyT(func(v *V2FolderSccBigQueryExport) pulumi.StringPtrOutput { return v.Description }).(pulumi.StringPtrOutput) +} + +// Expression that defines the filter to apply across create/update +// events of findings. The +// expression is a list of zero or more restrictions combined via +// logical operators AND and OR. Parentheses are supported, and OR +// has higher precedence than AND. +// Restrictions have the form and may have +// a - character in front of them to indicate negation. The fields +// map to those defined in the corresponding resource. +// The supported operators are: +// - = for all value types. +// - > , <, >=, <= for integer values. +// - :, meaning substring matching, for strings. +// The supported value types are: +// - string literals in quotes. +// - integer literals without quotes. +// - boolean literals true and false without quotes. +// See +// [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) +// for information on how to write a filter. +func (o V2FolderSccBigQueryExportOutput) Filter() pulumi.StringPtrOutput { + return o.ApplyT(func(v *V2FolderSccBigQueryExport) pulumi.StringPtrOutput { return v.Filter }).(pulumi.StringPtrOutput) +} + +// The folder where Cloud Security Command Center Big Query Export +// Config lives in. +func (o V2FolderSccBigQueryExportOutput) Folder() pulumi.StringOutput { + return o.ApplyT(func(v *V2FolderSccBigQueryExport) pulumi.StringOutput { return v.Folder }).(pulumi.StringOutput) +} + +// The BigQuery export configuration is stored in this location. If not provided, Use global as default. +func (o V2FolderSccBigQueryExportOutput) Location() pulumi.StringPtrOutput { + return o.ApplyT(func(v *V2FolderSccBigQueryExport) pulumi.StringPtrOutput { return v.Location }).(pulumi.StringPtrOutput) +} + +// Email address of the user who last edited the BigQuery export. +// This field is set by the server and will be ignored if provided on export creation or update. +func (o V2FolderSccBigQueryExportOutput) MostRecentEditor() pulumi.StringOutput { + return o.ApplyT(func(v *V2FolderSccBigQueryExport) pulumi.StringOutput { return v.MostRecentEditor }).(pulumi.StringOutput) +} + +// The resource name of this export, in the format +// `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. +// This field is provided in responses, and is ignored when provided in create requests. +func (o V2FolderSccBigQueryExportOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v *V2FolderSccBigQueryExport) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) +} + +// The service account that needs permission to create table and upload data to the BigQuery dataset. +func (o V2FolderSccBigQueryExportOutput) Principal() pulumi.StringOutput { + return o.ApplyT(func(v *V2FolderSccBigQueryExport) pulumi.StringOutput { return v.Principal }).(pulumi.StringOutput) +} + +// The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. +// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +// Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". +func (o V2FolderSccBigQueryExportOutput) UpdateTime() pulumi.StringOutput { + return o.ApplyT(func(v *V2FolderSccBigQueryExport) pulumi.StringOutput { return v.UpdateTime }).(pulumi.StringOutput) +} + +type V2FolderSccBigQueryExportArrayOutput struct{ *pulumi.OutputState } + +func (V2FolderSccBigQueryExportArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]*V2FolderSccBigQueryExport)(nil)).Elem() +} + +func (o V2FolderSccBigQueryExportArrayOutput) ToV2FolderSccBigQueryExportArrayOutput() V2FolderSccBigQueryExportArrayOutput { + return o +} + +func (o V2FolderSccBigQueryExportArrayOutput) ToV2FolderSccBigQueryExportArrayOutputWithContext(ctx context.Context) V2FolderSccBigQueryExportArrayOutput { + return o +} + +func (o V2FolderSccBigQueryExportArrayOutput) Index(i pulumi.IntInput) V2FolderSccBigQueryExportOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) *V2FolderSccBigQueryExport { + return vs[0].([]*V2FolderSccBigQueryExport)[vs[1].(int)] + }).(V2FolderSccBigQueryExportOutput) +} + +type V2FolderSccBigQueryExportMapOutput struct{ *pulumi.OutputState } + +func (V2FolderSccBigQueryExportMapOutput) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*V2FolderSccBigQueryExport)(nil)).Elem() +} + +func (o V2FolderSccBigQueryExportMapOutput) ToV2FolderSccBigQueryExportMapOutput() V2FolderSccBigQueryExportMapOutput { + return o +} + +func (o V2FolderSccBigQueryExportMapOutput) ToV2FolderSccBigQueryExportMapOutputWithContext(ctx context.Context) V2FolderSccBigQueryExportMapOutput { + return o +} + +func (o V2FolderSccBigQueryExportMapOutput) MapIndex(k pulumi.StringInput) V2FolderSccBigQueryExportOutput { + return pulumi.All(o, k).ApplyT(func(vs []interface{}) *V2FolderSccBigQueryExport { + return vs[0].(map[string]*V2FolderSccBigQueryExport)[vs[1].(string)] + }).(V2FolderSccBigQueryExportOutput) +} + +func init() { + pulumi.RegisterInputType(reflect.TypeOf((*V2FolderSccBigQueryExportInput)(nil)).Elem(), &V2FolderSccBigQueryExport{}) + pulumi.RegisterInputType(reflect.TypeOf((*V2FolderSccBigQueryExportArrayInput)(nil)).Elem(), V2FolderSccBigQueryExportArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*V2FolderSccBigQueryExportMapInput)(nil)).Elem(), V2FolderSccBigQueryExportMap{}) + pulumi.RegisterOutputType(V2FolderSccBigQueryExportOutput{}) + pulumi.RegisterOutputType(V2FolderSccBigQueryExportArrayOutput{}) + pulumi.RegisterOutputType(V2FolderSccBigQueryExportMapOutput{}) +} diff --git a/sdk/go/gcp/securitycenter/v2organizationSccBigQueryExports.go b/sdk/go/gcp/securitycenter/v2organizationSccBigQueryExports.go index 888425523a..a3159e2e9b 100644 --- a/sdk/go/gcp/securitycenter/v2organizationSccBigQueryExports.go +++ b/sdk/go/gcp/securitycenter/v2organizationSccBigQueryExports.go @@ -43,7 +43,7 @@ import ( // func main() { // pulumi.Run(func(ctx *pulumi.Context) error { // _, err := bigquery.NewDataset(ctx, "default", &bigquery.DatasetArgs{ -// DatasetId: pulumi.String("my_dataset_id"), +// DatasetId: pulumi.String(""), // FriendlyName: pulumi.String("test"), // Description: pulumi.String("This is a test description"), // Location: pulumi.String("US"), @@ -60,7 +60,7 @@ import ( // Name: pulumi.String("my-export"), // BigQueryExportId: pulumi.String("my-export"), // Organization: pulumi.String("123456789"), -// Dataset: pulumi.String("my-dataset"), +// Dataset: _default.ID(), // Location: pulumi.String("global"), // Description: pulumi.String("Cloud Security Command Center Findings Big Query Export Config"), // Filter: pulumi.String("state=\"ACTIVE\" AND NOT mute=\"MUTED\""), diff --git a/sdk/go/gcp/securitycenter/v2projectSccBigQueryExport.go b/sdk/go/gcp/securitycenter/v2projectSccBigQueryExport.go new file mode 100644 index 0000000000..8d22bb6b01 --- /dev/null +++ b/sdk/go/gcp/securitycenter/v2projectSccBigQueryExport.go @@ -0,0 +1,552 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package securitycenter + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// A Cloud Security Command Center (Cloud SCC) Big Query Export Config. +// It represents exporting Security Command Center data, including assets, findings, and security marks +// using gcloud scc bqexports +// > **Note:** In order to use Cloud SCC resources, your organization must be enrolled +// in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). +// Without doing so, you may run into errors during resource creation. +// +// To get more information about ProjectSccBigQueryExport, see: +// +// * [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/projects.locations.bigQueryExports) +// * How-to Guides +// - [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query) +// +// ## Example Usage +// +// ## Import +// +// ProjectSccBigQueryExport can be imported using any of these accepted formats: +// +// * `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}` +// +// * `{{project}}/{{location}}/{{big_query_export_id}}` +// +// * `{{location}}/{{big_query_export_id}}` +// +// When using the `pulumi import` command, ProjectSccBigQueryExport can be imported using one of the formats above. For example: +// +// ```sh +// $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} +// ``` +// +// ```sh +// $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default {{project}}/{{location}}/{{big_query_export_id}} +// ``` +// +// ```sh +// $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default {{location}}/{{big_query_export_id}} +// ``` +type V2ProjectSccBigQueryExport struct { + pulumi.CustomResourceState + + // This must be unique within the organization. + // + // *** + BigQueryExportId pulumi.StringOutput `pulumi:"bigQueryExportId"` + // The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + // Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + CreateTime pulumi.StringOutput `pulumi:"createTime"` + // The dataset to write findings' updates to. + // Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + // BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + Dataset pulumi.StringPtrOutput `pulumi:"dataset"` + // The description of the notification config (max of 1024 characters). + Description pulumi.StringPtrOutput `pulumi:"description"` + // Expression that defines the filter to apply across create/update + // events of findings. The + // expression is a list of zero or more restrictions combined via + // logical operators AND and OR. Parentheses are supported, and OR + // has higher precedence than AND. + // Restrictions have the form and may have + // a - character in front of them to indicate negation. The fields + // map to those defined in the corresponding resource. + // The supported operators are: + // * = for all value types. + // * > , <, >=, <= for integer values. + // * :, meaning substring matching, for strings. + // The supported value types are: + // * string literals in quotes. + // * integer literals without quotes. + // * boolean literals true and false without quotes. + // See + // [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + // for information on how to write a filter. + Filter pulumi.StringPtrOutput `pulumi:"filter"` + // location Id is provided by organization. If not provided, Use global as default. + Location pulumi.StringPtrOutput `pulumi:"location"` + // Email address of the user who last edited the BigQuery export. + // This field is set by the server and will be ignored if provided on export creation or update. + MostRecentEditor pulumi.StringOutput `pulumi:"mostRecentEditor"` + // The resource name of this export, in the format + // `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + // This field is provided in responses, and is ignored when provided in create requests. + Name pulumi.StringOutput `pulumi:"name"` + // The service account that needs permission to create table and upload data to the BigQuery dataset. + Principal pulumi.StringOutput `pulumi:"principal"` + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + Project pulumi.StringOutput `pulumi:"project"` + // The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + // Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + UpdateTime pulumi.StringOutput `pulumi:"updateTime"` +} + +// NewV2ProjectSccBigQueryExport registers a new resource with the given unique name, arguments, and options. +func NewV2ProjectSccBigQueryExport(ctx *pulumi.Context, + name string, args *V2ProjectSccBigQueryExportArgs, opts ...pulumi.ResourceOption) (*V2ProjectSccBigQueryExport, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.BigQueryExportId == nil { + return nil, errors.New("invalid value for required argument 'BigQueryExportId'") + } + opts = internal.PkgResourceDefaultOpts(opts) + var resource V2ProjectSccBigQueryExport + err := ctx.RegisterResource("gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetV2ProjectSccBigQueryExport gets an existing V2ProjectSccBigQueryExport resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetV2ProjectSccBigQueryExport(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *V2ProjectSccBigQueryExportState, opts ...pulumi.ResourceOption) (*V2ProjectSccBigQueryExport, error) { + var resource V2ProjectSccBigQueryExport + err := ctx.ReadResource("gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering V2ProjectSccBigQueryExport resources. +type v2projectSccBigQueryExportState struct { + // This must be unique within the organization. + // + // *** + BigQueryExportId *string `pulumi:"bigQueryExportId"` + // The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + // Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + CreateTime *string `pulumi:"createTime"` + // The dataset to write findings' updates to. + // Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + // BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + Dataset *string `pulumi:"dataset"` + // The description of the notification config (max of 1024 characters). + Description *string `pulumi:"description"` + // Expression that defines the filter to apply across create/update + // events of findings. The + // expression is a list of zero or more restrictions combined via + // logical operators AND and OR. Parentheses are supported, and OR + // has higher precedence than AND. + // Restrictions have the form and may have + // a - character in front of them to indicate negation. The fields + // map to those defined in the corresponding resource. + // The supported operators are: + // * = for all value types. + // * > , <, >=, <= for integer values. + // * :, meaning substring matching, for strings. + // The supported value types are: + // * string literals in quotes. + // * integer literals without quotes. + // * boolean literals true and false without quotes. + // See + // [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + // for information on how to write a filter. + Filter *string `pulumi:"filter"` + // location Id is provided by organization. If not provided, Use global as default. + Location *string `pulumi:"location"` + // Email address of the user who last edited the BigQuery export. + // This field is set by the server and will be ignored if provided on export creation or update. + MostRecentEditor *string `pulumi:"mostRecentEditor"` + // The resource name of this export, in the format + // `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + // This field is provided in responses, and is ignored when provided in create requests. + Name *string `pulumi:"name"` + // The service account that needs permission to create table and upload data to the BigQuery dataset. + Principal *string `pulumi:"principal"` + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + Project *string `pulumi:"project"` + // The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + // Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + UpdateTime *string `pulumi:"updateTime"` +} + +type V2ProjectSccBigQueryExportState struct { + // This must be unique within the organization. + // + // *** + BigQueryExportId pulumi.StringPtrInput + // The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + // Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + CreateTime pulumi.StringPtrInput + // The dataset to write findings' updates to. + // Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + // BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + Dataset pulumi.StringPtrInput + // The description of the notification config (max of 1024 characters). + Description pulumi.StringPtrInput + // Expression that defines the filter to apply across create/update + // events of findings. The + // expression is a list of zero or more restrictions combined via + // logical operators AND and OR. Parentheses are supported, and OR + // has higher precedence than AND. + // Restrictions have the form and may have + // a - character in front of them to indicate negation. The fields + // map to those defined in the corresponding resource. + // The supported operators are: + // * = for all value types. + // * > , <, >=, <= for integer values. + // * :, meaning substring matching, for strings. + // The supported value types are: + // * string literals in quotes. + // * integer literals without quotes. + // * boolean literals true and false without quotes. + // See + // [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + // for information on how to write a filter. + Filter pulumi.StringPtrInput + // location Id is provided by organization. If not provided, Use global as default. + Location pulumi.StringPtrInput + // Email address of the user who last edited the BigQuery export. + // This field is set by the server and will be ignored if provided on export creation or update. + MostRecentEditor pulumi.StringPtrInput + // The resource name of this export, in the format + // `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + // This field is provided in responses, and is ignored when provided in create requests. + Name pulumi.StringPtrInput + // The service account that needs permission to create table and upload data to the BigQuery dataset. + Principal pulumi.StringPtrInput + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + Project pulumi.StringPtrInput + // The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + // A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + // Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + UpdateTime pulumi.StringPtrInput +} + +func (V2ProjectSccBigQueryExportState) ElementType() reflect.Type { + return reflect.TypeOf((*v2projectSccBigQueryExportState)(nil)).Elem() +} + +type v2projectSccBigQueryExportArgs struct { + // This must be unique within the organization. + // + // *** + BigQueryExportId string `pulumi:"bigQueryExportId"` + // The dataset to write findings' updates to. + // Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + // BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + Dataset *string `pulumi:"dataset"` + // The description of the notification config (max of 1024 characters). + Description *string `pulumi:"description"` + // Expression that defines the filter to apply across create/update + // events of findings. The + // expression is a list of zero or more restrictions combined via + // logical operators AND and OR. Parentheses are supported, and OR + // has higher precedence than AND. + // Restrictions have the form and may have + // a - character in front of them to indicate negation. The fields + // map to those defined in the corresponding resource. + // The supported operators are: + // * = for all value types. + // * > , <, >=, <= for integer values. + // * :, meaning substring matching, for strings. + // The supported value types are: + // * string literals in quotes. + // * integer literals without quotes. + // * boolean literals true and false without quotes. + // See + // [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + // for information on how to write a filter. + Filter *string `pulumi:"filter"` + // location Id is provided by organization. If not provided, Use global as default. + Location *string `pulumi:"location"` + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + Project *string `pulumi:"project"` +} + +// The set of arguments for constructing a V2ProjectSccBigQueryExport resource. +type V2ProjectSccBigQueryExportArgs struct { + // This must be unique within the organization. + // + // *** + BigQueryExportId pulumi.StringInput + // The dataset to write findings' updates to. + // Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + // BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + Dataset pulumi.StringPtrInput + // The description of the notification config (max of 1024 characters). + Description pulumi.StringPtrInput + // Expression that defines the filter to apply across create/update + // events of findings. The + // expression is a list of zero or more restrictions combined via + // logical operators AND and OR. Parentheses are supported, and OR + // has higher precedence than AND. + // Restrictions have the form and may have + // a - character in front of them to indicate negation. The fields + // map to those defined in the corresponding resource. + // The supported operators are: + // * = for all value types. + // * > , <, >=, <= for integer values. + // * :, meaning substring matching, for strings. + // The supported value types are: + // * string literals in quotes. + // * integer literals without quotes. + // * boolean literals true and false without quotes. + // See + // [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + // for information on how to write a filter. + Filter pulumi.StringPtrInput + // location Id is provided by organization. If not provided, Use global as default. + Location pulumi.StringPtrInput + // The ID of the project in which the resource belongs. + // If it is not provided, the provider project is used. + Project pulumi.StringPtrInput +} + +func (V2ProjectSccBigQueryExportArgs) ElementType() reflect.Type { + return reflect.TypeOf((*v2projectSccBigQueryExportArgs)(nil)).Elem() +} + +type V2ProjectSccBigQueryExportInput interface { + pulumi.Input + + ToV2ProjectSccBigQueryExportOutput() V2ProjectSccBigQueryExportOutput + ToV2ProjectSccBigQueryExportOutputWithContext(ctx context.Context) V2ProjectSccBigQueryExportOutput +} + +func (*V2ProjectSccBigQueryExport) ElementType() reflect.Type { + return reflect.TypeOf((**V2ProjectSccBigQueryExport)(nil)).Elem() +} + +func (i *V2ProjectSccBigQueryExport) ToV2ProjectSccBigQueryExportOutput() V2ProjectSccBigQueryExportOutput { + return i.ToV2ProjectSccBigQueryExportOutputWithContext(context.Background()) +} + +func (i *V2ProjectSccBigQueryExport) ToV2ProjectSccBigQueryExportOutputWithContext(ctx context.Context) V2ProjectSccBigQueryExportOutput { + return pulumi.ToOutputWithContext(ctx, i).(V2ProjectSccBigQueryExportOutput) +} + +// V2ProjectSccBigQueryExportArrayInput is an input type that accepts V2ProjectSccBigQueryExportArray and V2ProjectSccBigQueryExportArrayOutput values. +// You can construct a concrete instance of `V2ProjectSccBigQueryExportArrayInput` via: +// +// V2ProjectSccBigQueryExportArray{ V2ProjectSccBigQueryExportArgs{...} } +type V2ProjectSccBigQueryExportArrayInput interface { + pulumi.Input + + ToV2ProjectSccBigQueryExportArrayOutput() V2ProjectSccBigQueryExportArrayOutput + ToV2ProjectSccBigQueryExportArrayOutputWithContext(context.Context) V2ProjectSccBigQueryExportArrayOutput +} + +type V2ProjectSccBigQueryExportArray []V2ProjectSccBigQueryExportInput + +func (V2ProjectSccBigQueryExportArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]*V2ProjectSccBigQueryExport)(nil)).Elem() +} + +func (i V2ProjectSccBigQueryExportArray) ToV2ProjectSccBigQueryExportArrayOutput() V2ProjectSccBigQueryExportArrayOutput { + return i.ToV2ProjectSccBigQueryExportArrayOutputWithContext(context.Background()) +} + +func (i V2ProjectSccBigQueryExportArray) ToV2ProjectSccBigQueryExportArrayOutputWithContext(ctx context.Context) V2ProjectSccBigQueryExportArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(V2ProjectSccBigQueryExportArrayOutput) +} + +// V2ProjectSccBigQueryExportMapInput is an input type that accepts V2ProjectSccBigQueryExportMap and V2ProjectSccBigQueryExportMapOutput values. +// You can construct a concrete instance of `V2ProjectSccBigQueryExportMapInput` via: +// +// V2ProjectSccBigQueryExportMap{ "key": V2ProjectSccBigQueryExportArgs{...} } +type V2ProjectSccBigQueryExportMapInput interface { + pulumi.Input + + ToV2ProjectSccBigQueryExportMapOutput() V2ProjectSccBigQueryExportMapOutput + ToV2ProjectSccBigQueryExportMapOutputWithContext(context.Context) V2ProjectSccBigQueryExportMapOutput +} + +type V2ProjectSccBigQueryExportMap map[string]V2ProjectSccBigQueryExportInput + +func (V2ProjectSccBigQueryExportMap) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*V2ProjectSccBigQueryExport)(nil)).Elem() +} + +func (i V2ProjectSccBigQueryExportMap) ToV2ProjectSccBigQueryExportMapOutput() V2ProjectSccBigQueryExportMapOutput { + return i.ToV2ProjectSccBigQueryExportMapOutputWithContext(context.Background()) +} + +func (i V2ProjectSccBigQueryExportMap) ToV2ProjectSccBigQueryExportMapOutputWithContext(ctx context.Context) V2ProjectSccBigQueryExportMapOutput { + return pulumi.ToOutputWithContext(ctx, i).(V2ProjectSccBigQueryExportMapOutput) +} + +type V2ProjectSccBigQueryExportOutput struct{ *pulumi.OutputState } + +func (V2ProjectSccBigQueryExportOutput) ElementType() reflect.Type { + return reflect.TypeOf((**V2ProjectSccBigQueryExport)(nil)).Elem() +} + +func (o V2ProjectSccBigQueryExportOutput) ToV2ProjectSccBigQueryExportOutput() V2ProjectSccBigQueryExportOutput { + return o +} + +func (o V2ProjectSccBigQueryExportOutput) ToV2ProjectSccBigQueryExportOutputWithContext(ctx context.Context) V2ProjectSccBigQueryExportOutput { + return o +} + +// This must be unique within the organization. +// +// *** +func (o V2ProjectSccBigQueryExportOutput) BigQueryExportId() pulumi.StringOutput { + return o.ApplyT(func(v *V2ProjectSccBigQueryExport) pulumi.StringOutput { return v.BigQueryExportId }).(pulumi.StringOutput) +} + +// The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. +// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +// Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". +func (o V2ProjectSccBigQueryExportOutput) CreateTime() pulumi.StringOutput { + return o.ApplyT(func(v *V2ProjectSccBigQueryExport) pulumi.StringOutput { return v.CreateTime }).(pulumi.StringOutput) +} + +// The dataset to write findings' updates to. +// Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". +// BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). +func (o V2ProjectSccBigQueryExportOutput) Dataset() pulumi.StringPtrOutput { + return o.ApplyT(func(v *V2ProjectSccBigQueryExport) pulumi.StringPtrOutput { return v.Dataset }).(pulumi.StringPtrOutput) +} + +// The description of the notification config (max of 1024 characters). +func (o V2ProjectSccBigQueryExportOutput) Description() pulumi.StringPtrOutput { + return o.ApplyT(func(v *V2ProjectSccBigQueryExport) pulumi.StringPtrOutput { return v.Description }).(pulumi.StringPtrOutput) +} + +// Expression that defines the filter to apply across create/update +// events of findings. The +// expression is a list of zero or more restrictions combined via +// logical operators AND and OR. Parentheses are supported, and OR +// has higher precedence than AND. +// Restrictions have the form and may have +// a - character in front of them to indicate negation. The fields +// map to those defined in the corresponding resource. +// The supported operators are: +// - = for all value types. +// - > , <, >=, <= for integer values. +// - :, meaning substring matching, for strings. +// The supported value types are: +// - string literals in quotes. +// - integer literals without quotes. +// - boolean literals true and false without quotes. +// See +// [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) +// for information on how to write a filter. +func (o V2ProjectSccBigQueryExportOutput) Filter() pulumi.StringPtrOutput { + return o.ApplyT(func(v *V2ProjectSccBigQueryExport) pulumi.StringPtrOutput { return v.Filter }).(pulumi.StringPtrOutput) +} + +// location Id is provided by organization. If not provided, Use global as default. +func (o V2ProjectSccBigQueryExportOutput) Location() pulumi.StringPtrOutput { + return o.ApplyT(func(v *V2ProjectSccBigQueryExport) pulumi.StringPtrOutput { return v.Location }).(pulumi.StringPtrOutput) +} + +// Email address of the user who last edited the BigQuery export. +// This field is set by the server and will be ignored if provided on export creation or update. +func (o V2ProjectSccBigQueryExportOutput) MostRecentEditor() pulumi.StringOutput { + return o.ApplyT(func(v *V2ProjectSccBigQueryExport) pulumi.StringOutput { return v.MostRecentEditor }).(pulumi.StringOutput) +} + +// The resource name of this export, in the format +// `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. +// This field is provided in responses, and is ignored when provided in create requests. +func (o V2ProjectSccBigQueryExportOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v *V2ProjectSccBigQueryExport) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) +} + +// The service account that needs permission to create table and upload data to the BigQuery dataset. +func (o V2ProjectSccBigQueryExportOutput) Principal() pulumi.StringOutput { + return o.ApplyT(func(v *V2ProjectSccBigQueryExport) pulumi.StringOutput { return v.Principal }).(pulumi.StringOutput) +} + +// The ID of the project in which the resource belongs. +// If it is not provided, the provider project is used. +func (o V2ProjectSccBigQueryExportOutput) Project() pulumi.StringOutput { + return o.ApplyT(func(v *V2ProjectSccBigQueryExport) pulumi.StringOutput { return v.Project }).(pulumi.StringOutput) +} + +// The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. +// A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. +// Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". +func (o V2ProjectSccBigQueryExportOutput) UpdateTime() pulumi.StringOutput { + return o.ApplyT(func(v *V2ProjectSccBigQueryExport) pulumi.StringOutput { return v.UpdateTime }).(pulumi.StringOutput) +} + +type V2ProjectSccBigQueryExportArrayOutput struct{ *pulumi.OutputState } + +func (V2ProjectSccBigQueryExportArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]*V2ProjectSccBigQueryExport)(nil)).Elem() +} + +func (o V2ProjectSccBigQueryExportArrayOutput) ToV2ProjectSccBigQueryExportArrayOutput() V2ProjectSccBigQueryExportArrayOutput { + return o +} + +func (o V2ProjectSccBigQueryExportArrayOutput) ToV2ProjectSccBigQueryExportArrayOutputWithContext(ctx context.Context) V2ProjectSccBigQueryExportArrayOutput { + return o +} + +func (o V2ProjectSccBigQueryExportArrayOutput) Index(i pulumi.IntInput) V2ProjectSccBigQueryExportOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) *V2ProjectSccBigQueryExport { + return vs[0].([]*V2ProjectSccBigQueryExport)[vs[1].(int)] + }).(V2ProjectSccBigQueryExportOutput) +} + +type V2ProjectSccBigQueryExportMapOutput struct{ *pulumi.OutputState } + +func (V2ProjectSccBigQueryExportMapOutput) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*V2ProjectSccBigQueryExport)(nil)).Elem() +} + +func (o V2ProjectSccBigQueryExportMapOutput) ToV2ProjectSccBigQueryExportMapOutput() V2ProjectSccBigQueryExportMapOutput { + return o +} + +func (o V2ProjectSccBigQueryExportMapOutput) ToV2ProjectSccBigQueryExportMapOutputWithContext(ctx context.Context) V2ProjectSccBigQueryExportMapOutput { + return o +} + +func (o V2ProjectSccBigQueryExportMapOutput) MapIndex(k pulumi.StringInput) V2ProjectSccBigQueryExportOutput { + return pulumi.All(o, k).ApplyT(func(vs []interface{}) *V2ProjectSccBigQueryExport { + return vs[0].(map[string]*V2ProjectSccBigQueryExport)[vs[1].(string)] + }).(V2ProjectSccBigQueryExportOutput) +} + +func init() { + pulumi.RegisterInputType(reflect.TypeOf((*V2ProjectSccBigQueryExportInput)(nil)).Elem(), &V2ProjectSccBigQueryExport{}) + pulumi.RegisterInputType(reflect.TypeOf((*V2ProjectSccBigQueryExportArrayInput)(nil)).Elem(), V2ProjectSccBigQueryExportArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*V2ProjectSccBigQueryExportMapInput)(nil)).Elem(), V2ProjectSccBigQueryExportMap{}) + pulumi.RegisterOutputType(V2ProjectSccBigQueryExportOutput{}) + pulumi.RegisterOutputType(V2ProjectSccBigQueryExportArrayOutput{}) + pulumi.RegisterOutputType(V2ProjectSccBigQueryExportMapOutput{}) +} diff --git a/sdk/java/build.gradle b/sdk/java/build.gradle index 8d741c9681..db29a2631a 100644 --- a/sdk/java/build.gradle +++ b/sdk/java/build.gradle @@ -44,7 +44,7 @@ repositories { dependencies { implementation("com.google.code.findbugs:jsr305:3.0.2") implementation("com.google.code.gson:gson:2.8.9") - implementation("com.pulumi:pulumi:0.15.0") + implementation("com.pulumi:pulumi:0.16.1") } task sourcesJar(type: Jar) { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/alloydb/Cluster.java b/sdk/java/src/main/java/com/pulumi/gcp/alloydb/Cluster.java index 673d257a13..9461fa0be2 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/alloydb/Cluster.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/alloydb/Cluster.java @@ -24,6 +24,7 @@ import com.pulumi.gcp.alloydb.outputs.ClusterRestoreBackupSource; import com.pulumi.gcp.alloydb.outputs.ClusterRestoreContinuousBackupSource; import com.pulumi.gcp.alloydb.outputs.ClusterSecondaryConfig; +import com.pulumi.gcp.alloydb.outputs.ClusterTrialMetadata; import java.lang.Boolean; import java.lang.String; import java.util.List; @@ -442,6 +443,7 @@ public Output databaseVersion() { * Policy to determine if the cluster should be deleted forcefully. * Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. * Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + * Possible values: DEFAULT, FORCE * */ @Export(name="deletionPolicy", refs={String.class}, tree="[0]") @@ -451,6 +453,7 @@ public Output databaseVersion() { * @return Policy to determine if the cluster should be deleted forcefully. * Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. * Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + * Possible values: DEFAULT, FORCE * */ public Output> deletionPolicy() { @@ -780,6 +783,38 @@ public Output> secondaryConfig() { public Output state() { return this.state; } + /** + * The subscrition type of cluster. + * Possible values are: `TRIAL`, `STANDARD`. + * + */ + @Export(name="subscriptionType", refs={String.class}, tree="[0]") + private Output subscriptionType; + + /** + * @return The subscrition type of cluster. + * Possible values are: `TRIAL`, `STANDARD`. + * + */ + public Output subscriptionType() { + return this.subscriptionType; + } + /** + * Contains information and all metadata related to TRIAL clusters. + * Structure is documented below. + * + */ + @Export(name="trialMetadatas", refs={List.class,ClusterTrialMetadata.class}, tree="[0,1]") + private Output> trialMetadatas; + + /** + * @return Contains information and all metadata related to TRIAL clusters. + * Structure is documented below. + * + */ + public Output> trialMetadatas() { + return this.trialMetadatas; + } /** * The system-generated UID of the resource. * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/alloydb/ClusterArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/alloydb/ClusterArgs.java index b4bf8d7654..3cf36bc2b2 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/alloydb/ClusterArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/alloydb/ClusterArgs.java @@ -139,6 +139,7 @@ public Optional> databaseVersion() { * Policy to determine if the cluster should be deleted forcefully. * Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. * Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + * Possible values: DEFAULT, FORCE * */ @Import(name="deletionPolicy") @@ -148,6 +149,7 @@ public Optional> databaseVersion() { * @return Policy to determine if the cluster should be deleted forcefully. * Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. * Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + * Possible values: DEFAULT, FORCE * */ public Optional> deletionPolicy() { @@ -375,6 +377,23 @@ public Optional> secondaryConfig() { return Optional.ofNullable(this.secondaryConfig); } + /** + * The subscrition type of cluster. + * Possible values are: `TRIAL`, `STANDARD`. + * + */ + @Import(name="subscriptionType") + private @Nullable Output subscriptionType; + + /** + * @return The subscrition type of cluster. + * Possible values are: `TRIAL`, `STANDARD`. + * + */ + public Optional> subscriptionType() { + return Optional.ofNullable(this.subscriptionType); + } + private ClusterArgs() {} private ClusterArgs(ClusterArgs $) { @@ -398,6 +417,7 @@ private ClusterArgs(ClusterArgs $) { this.restoreBackupSource = $.restoreBackupSource; this.restoreContinuousBackupSource = $.restoreContinuousBackupSource; this.secondaryConfig = $.secondaryConfig; + this.subscriptionType = $.subscriptionType; } public static Builder builder() { @@ -566,6 +586,7 @@ public Builder databaseVersion(String databaseVersion) { * @param deletionPolicy Policy to determine if the cluster should be deleted forcefully. * Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. * Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + * Possible values: DEFAULT, FORCE * * @return builder * @@ -579,6 +600,7 @@ public Builder deletionPolicy(@Nullable Output deletionPolicy) { * @param deletionPolicy Policy to determine if the cluster should be deleted forcefully. * Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. * Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + * Possible values: DEFAULT, FORCE * * @return builder * @@ -886,6 +908,29 @@ public Builder secondaryConfig(ClusterSecondaryConfigArgs secondaryConfig) { return secondaryConfig(Output.of(secondaryConfig)); } + /** + * @param subscriptionType The subscrition type of cluster. + * Possible values are: `TRIAL`, `STANDARD`. + * + * @return builder + * + */ + public Builder subscriptionType(@Nullable Output subscriptionType) { + $.subscriptionType = subscriptionType; + return this; + } + + /** + * @param subscriptionType The subscrition type of cluster. + * Possible values are: `TRIAL`, `STANDARD`. + * + * @return builder + * + */ + public Builder subscriptionType(String subscriptionType) { + return subscriptionType(Output.of(subscriptionType)); + } + public ClusterArgs build() { if ($.clusterId == null) { throw new MissingRequiredPropertyException("ClusterArgs", "clusterId"); diff --git a/sdk/java/src/main/java/com/pulumi/gcp/alloydb/inputs/ClusterState.java b/sdk/java/src/main/java/com/pulumi/gcp/alloydb/inputs/ClusterState.java index aaa337e626..f7806e0ee8 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/alloydb/inputs/ClusterState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/alloydb/inputs/ClusterState.java @@ -19,6 +19,7 @@ import com.pulumi.gcp.alloydb.inputs.ClusterRestoreBackupSourceArgs; import com.pulumi.gcp.alloydb.inputs.ClusterRestoreContinuousBackupSourceArgs; import com.pulumi.gcp.alloydb.inputs.ClusterSecondaryConfigArgs; +import com.pulumi.gcp.alloydb.inputs.ClusterTrialMetadataArgs; import java.lang.Boolean; import java.lang.String; import java.util.List; @@ -178,6 +179,7 @@ public Optional> databaseVersion() { * Policy to determine if the cluster should be deleted forcefully. * Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. * Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + * Possible values: DEFAULT, FORCE * */ @Import(name="deletionPolicy") @@ -187,6 +189,7 @@ public Optional> databaseVersion() { * @return Policy to determine if the cluster should be deleted forcefully. * Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. * Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + * Possible values: DEFAULT, FORCE * */ public Optional> deletionPolicy() { @@ -538,6 +541,40 @@ public Optional> state() { return Optional.ofNullable(this.state); } + /** + * The subscrition type of cluster. + * Possible values are: `TRIAL`, `STANDARD`. + * + */ + @Import(name="subscriptionType") + private @Nullable Output subscriptionType; + + /** + * @return The subscrition type of cluster. + * Possible values are: `TRIAL`, `STANDARD`. + * + */ + public Optional> subscriptionType() { + return Optional.ofNullable(this.subscriptionType); + } + + /** + * Contains information and all metadata related to TRIAL clusters. + * Structure is documented below. + * + */ + @Import(name="trialMetadatas") + private @Nullable Output> trialMetadatas; + + /** + * @return Contains information and all metadata related to TRIAL clusters. + * Structure is documented below. + * + */ + public Optional>> trialMetadatas() { + return Optional.ofNullable(this.trialMetadatas); + } + /** * The system-generated UID of the resource. * @@ -586,6 +623,8 @@ private ClusterState(ClusterState $) { this.restoreContinuousBackupSource = $.restoreContinuousBackupSource; this.secondaryConfig = $.secondaryConfig; this.state = $.state; + this.subscriptionType = $.subscriptionType; + this.trialMetadatas = $.trialMetadatas; this.uid = $.uid; } @@ -823,6 +862,7 @@ public Builder databaseVersion(String databaseVersion) { * @param deletionPolicy Policy to determine if the cluster should be deleted forcefully. * Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. * Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + * Possible values: DEFAULT, FORCE * * @return builder * @@ -836,6 +876,7 @@ public Builder deletionPolicy(@Nullable Output deletionPolicy) { * @param deletionPolicy Policy to determine if the cluster should be deleted forcefully. * Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. * Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + * Possible values: DEFAULT, FORCE * * @return builder * @@ -1334,6 +1375,63 @@ public Builder state(String state) { return state(Output.of(state)); } + /** + * @param subscriptionType The subscrition type of cluster. + * Possible values are: `TRIAL`, `STANDARD`. + * + * @return builder + * + */ + public Builder subscriptionType(@Nullable Output subscriptionType) { + $.subscriptionType = subscriptionType; + return this; + } + + /** + * @param subscriptionType The subscrition type of cluster. + * Possible values are: `TRIAL`, `STANDARD`. + * + * @return builder + * + */ + public Builder subscriptionType(String subscriptionType) { + return subscriptionType(Output.of(subscriptionType)); + } + + /** + * @param trialMetadatas Contains information and all metadata related to TRIAL clusters. + * Structure is documented below. + * + * @return builder + * + */ + public Builder trialMetadatas(@Nullable Output> trialMetadatas) { + $.trialMetadatas = trialMetadatas; + return this; + } + + /** + * @param trialMetadatas Contains information and all metadata related to TRIAL clusters. + * Structure is documented below. + * + * @return builder + * + */ + public Builder trialMetadatas(List trialMetadatas) { + return trialMetadatas(Output.of(trialMetadatas)); + } + + /** + * @param trialMetadatas Contains information and all metadata related to TRIAL clusters. + * Structure is documented below. + * + * @return builder + * + */ + public Builder trialMetadatas(ClusterTrialMetadataArgs... trialMetadatas) { + return trialMetadatas(List.of(trialMetadatas)); + } + /** * @param uid The system-generated UID of the resource. * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/alloydb/inputs/ClusterTrialMetadataArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/alloydb/inputs/ClusterTrialMetadataArgs.java new file mode 100644 index 0000000000..5694237301 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/alloydb/inputs/ClusterTrialMetadataArgs.java @@ -0,0 +1,194 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.alloydb.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class ClusterTrialMetadataArgs extends com.pulumi.resources.ResourceArgs { + + public static final ClusterTrialMetadataArgs Empty = new ClusterTrialMetadataArgs(); + + /** + * End time of the trial cluster. + * + */ + @Import(name="endTime") + private @Nullable Output endTime; + + /** + * @return End time of the trial cluster. + * + */ + public Optional> endTime() { + return Optional.ofNullable(this.endTime); + } + + /** + * Grace end time of the trial cluster. + * + */ + @Import(name="graceEndTime") + private @Nullable Output graceEndTime; + + /** + * @return Grace end time of the trial cluster. + * + */ + public Optional> graceEndTime() { + return Optional.ofNullable(this.graceEndTime); + } + + /** + * Start time of the trial cluster. + * + */ + @Import(name="startTime") + private @Nullable Output startTime; + + /** + * @return Start time of the trial cluster. + * + */ + public Optional> startTime() { + return Optional.ofNullable(this.startTime); + } + + /** + * Upgrade time of the trial cluster to standard cluster. + * + */ + @Import(name="upgradeTime") + private @Nullable Output upgradeTime; + + /** + * @return Upgrade time of the trial cluster to standard cluster. + * + */ + public Optional> upgradeTime() { + return Optional.ofNullable(this.upgradeTime); + } + + private ClusterTrialMetadataArgs() {} + + private ClusterTrialMetadataArgs(ClusterTrialMetadataArgs $) { + this.endTime = $.endTime; + this.graceEndTime = $.graceEndTime; + this.startTime = $.startTime; + this.upgradeTime = $.upgradeTime; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(ClusterTrialMetadataArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private ClusterTrialMetadataArgs $; + + public Builder() { + $ = new ClusterTrialMetadataArgs(); + } + + public Builder(ClusterTrialMetadataArgs defaults) { + $ = new ClusterTrialMetadataArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param endTime End time of the trial cluster. + * + * @return builder + * + */ + public Builder endTime(@Nullable Output endTime) { + $.endTime = endTime; + return this; + } + + /** + * @param endTime End time of the trial cluster. + * + * @return builder + * + */ + public Builder endTime(String endTime) { + return endTime(Output.of(endTime)); + } + + /** + * @param graceEndTime Grace end time of the trial cluster. + * + * @return builder + * + */ + public Builder graceEndTime(@Nullable Output graceEndTime) { + $.graceEndTime = graceEndTime; + return this; + } + + /** + * @param graceEndTime Grace end time of the trial cluster. + * + * @return builder + * + */ + public Builder graceEndTime(String graceEndTime) { + return graceEndTime(Output.of(graceEndTime)); + } + + /** + * @param startTime Start time of the trial cluster. + * + * @return builder + * + */ + public Builder startTime(@Nullable Output startTime) { + $.startTime = startTime; + return this; + } + + /** + * @param startTime Start time of the trial cluster. + * + * @return builder + * + */ + public Builder startTime(String startTime) { + return startTime(Output.of(startTime)); + } + + /** + * @param upgradeTime Upgrade time of the trial cluster to standard cluster. + * + * @return builder + * + */ + public Builder upgradeTime(@Nullable Output upgradeTime) { + $.upgradeTime = upgradeTime; + return this; + } + + /** + * @param upgradeTime Upgrade time of the trial cluster to standard cluster. + * + * @return builder + * + */ + public Builder upgradeTime(String upgradeTime) { + return upgradeTime(Output.of(upgradeTime)); + } + + public ClusterTrialMetadataArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/alloydb/outputs/ClusterTrialMetadata.java b/sdk/java/src/main/java/com/pulumi/gcp/alloydb/outputs/ClusterTrialMetadata.java new file mode 100644 index 0000000000..d2ea8d7d5b --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/alloydb/outputs/ClusterTrialMetadata.java @@ -0,0 +1,120 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.alloydb.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class ClusterTrialMetadata { + /** + * @return End time of the trial cluster. + * + */ + private @Nullable String endTime; + /** + * @return Grace end time of the trial cluster. + * + */ + private @Nullable String graceEndTime; + /** + * @return Start time of the trial cluster. + * + */ + private @Nullable String startTime; + /** + * @return Upgrade time of the trial cluster to standard cluster. + * + */ + private @Nullable String upgradeTime; + + private ClusterTrialMetadata() {} + /** + * @return End time of the trial cluster. + * + */ + public Optional endTime() { + return Optional.ofNullable(this.endTime); + } + /** + * @return Grace end time of the trial cluster. + * + */ + public Optional graceEndTime() { + return Optional.ofNullable(this.graceEndTime); + } + /** + * @return Start time of the trial cluster. + * + */ + public Optional startTime() { + return Optional.ofNullable(this.startTime); + } + /** + * @return Upgrade time of the trial cluster to standard cluster. + * + */ + public Optional upgradeTime() { + return Optional.ofNullable(this.upgradeTime); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(ClusterTrialMetadata defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String endTime; + private @Nullable String graceEndTime; + private @Nullable String startTime; + private @Nullable String upgradeTime; + public Builder() {} + public Builder(ClusterTrialMetadata defaults) { + Objects.requireNonNull(defaults); + this.endTime = defaults.endTime; + this.graceEndTime = defaults.graceEndTime; + this.startTime = defaults.startTime; + this.upgradeTime = defaults.upgradeTime; + } + + @CustomType.Setter + public Builder endTime(@Nullable String endTime) { + + this.endTime = endTime; + return this; + } + @CustomType.Setter + public Builder graceEndTime(@Nullable String graceEndTime) { + + this.graceEndTime = graceEndTime; + return this; + } + @CustomType.Setter + public Builder startTime(@Nullable String startTime) { + + this.startTime = startTime; + return this; + } + @CustomType.Setter + public Builder upgradeTime(@Nullable String upgradeTime) { + + this.upgradeTime = upgradeTime; + return this; + } + public ClusterTrialMetadata build() { + final var _resultValue = new ClusterTrialMetadata(); + _resultValue.endTime = endTime; + _resultValue.graceEndTime = graceEndTime; + _resultValue.startTime = startTime; + _resultValue.upgradeTime = upgradeTime; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/assuredworkloads/Workload.java b/sdk/java/src/main/java/com/pulumi/gcp/assuredworkloads/Workload.java index f9e260b330..d40b7f5546 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/assuredworkloads/Workload.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/assuredworkloads/Workload.java @@ -69,7 +69,7 @@ * .provisionedResourcesParent("folders/519620126891") * .resourceSettings( * WorkloadResourceSettingArgs.builder() - * .displayName("folder-display-name") + * .displayName("{{name}}") * .resourceType("CONSUMER_FOLDER") * .build(), * WorkloadResourceSettingArgs.builder() @@ -145,6 +145,66 @@ * } * * <!--End PulumiCodeChooser --> + * ### Split_billing_partner_workload + * A Split billing partner test of the assuredworkloads api + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.assuredworkloads.Workload;
+ * import com.pulumi.gcp.assuredworkloads.WorkloadArgs;
+ * import com.pulumi.gcp.assuredworkloads.inputs.WorkloadPartnerPermissionsArgs;
+ * import com.pulumi.gcp.assuredworkloads.inputs.WorkloadResourceSettingArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var primary = new Workload("primary", WorkloadArgs.builder()
+ *             .complianceRegime("ASSURED_WORKLOADS_FOR_PARTNERS")
+ *             .displayName("display")
+ *             .location("europe-west8")
+ *             .organization("123456789")
+ *             .billingAccount("billingAccounts/000000-0000000-0000000-000000")
+ *             .partner("SOVEREIGN_CONTROLS_BY_PSN")
+ *             .partnerPermissions(WorkloadPartnerPermissionsArgs.builder()
+ *                 .assuredWorkloadsMonitoring(true)
+ *                 .dataLogsViewer(true)
+ *                 .serviceAccessApprover(true)
+ *                 .build())
+ *             .partnerServicesBillingAccount("billingAccounts/01BF3F-2C6DE5-30C607")
+ *             .resourceSettings(            
+ *                 WorkloadResourceSettingArgs.builder()
+ *                     .resourceType("CONSUMER_FOLDER")
+ *                     .build(),
+ *                 WorkloadResourceSettingArgs.builder()
+ *                     .resourceType("ENCRYPTION_KEYS_PROJECT")
+ *                     .build(),
+ *                 WorkloadResourceSettingArgs.builder()
+ *                     .resourceId("ring")
+ *                     .resourceType("KEYRING")
+ *                     .build())
+ *             .violationNotificationsEnabled(true)
+ *             .labels(Map.of("label-one", "value-one"))
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> * * ## Import * @@ -182,14 +242,14 @@ public Output> billingAccount() { return Codegen.optional(this.billingAccount); } /** - * Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + * Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS * */ @Export(name="complianceRegime", refs={String.class}, tree="[0]") private Output complianceRegime; /** - * @return Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + * @return Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS * */ public Output complianceRegime() { @@ -388,14 +448,14 @@ public Output organization() { return this.organization; } /** - * Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + * Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM * */ @Export(name="partner", refs={String.class}, tree="[0]") private Output partner; /** - * @return Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + * @return Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM * */ public Output> partner() { @@ -415,6 +475,20 @@ public Output> partner() { public Output> partnerPermissions() { return Codegen.optional(this.partnerPermissions); } + /** + * Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + * + */ + @Export(name="partnerServicesBillingAccount", refs={String.class}, tree="[0]") + private Output partnerServicesBillingAccount; + + /** + * @return Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + * + */ + public Output> partnerServicesBillingAccount() { + return Codegen.optional(this.partnerServicesBillingAccount); + } /** * Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/assuredworkloads/WorkloadArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/assuredworkloads/WorkloadArgs.java index 8acb125f78..6971a036bd 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/assuredworkloads/WorkloadArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/assuredworkloads/WorkloadArgs.java @@ -38,14 +38,14 @@ public Optional> billingAccount() { } /** - * Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + * Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS * */ @Import(name="complianceRegime", required=true) private Output complianceRegime; /** - * @return Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + * @return Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS * */ public Output complianceRegime() { @@ -153,14 +153,14 @@ public Output organization() { } /** - * Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + * Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM * */ @Import(name="partner") private @Nullable Output partner; /** - * @return Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + * @return Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM * */ public Optional> partner() { @@ -182,6 +182,21 @@ public Optional> partnerPermissions() { return Optional.ofNullable(this.partnerPermissions); } + /** + * Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + * + */ + @Import(name="partnerServicesBillingAccount") + private @Nullable Output partnerServicesBillingAccount; + + /** + * @return Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + * + */ + public Optional> partnerServicesBillingAccount() { + return Optional.ofNullable(this.partnerServicesBillingAccount); + } + /** * Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} * @@ -240,6 +255,7 @@ private WorkloadArgs(WorkloadArgs $) { this.organization = $.organization; this.partner = $.partner; this.partnerPermissions = $.partnerPermissions; + this.partnerServicesBillingAccount = $.partnerServicesBillingAccount; this.provisionedResourcesParent = $.provisionedResourcesParent; this.resourceSettings = $.resourceSettings; this.violationNotificationsEnabled = $.violationNotificationsEnabled; @@ -285,7 +301,7 @@ public Builder billingAccount(String billingAccount) { } /** - * @param complianceRegime Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + * @param complianceRegime Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS * * @return builder * @@ -296,7 +312,7 @@ public Builder complianceRegime(Output complianceRegime) { } /** - * @param complianceRegime Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + * @param complianceRegime Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS * * @return builder * @@ -442,7 +458,7 @@ public Builder organization(String organization) { } /** - * @param partner Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + * @param partner Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM * * @return builder * @@ -453,7 +469,7 @@ public Builder partner(@Nullable Output partner) { } /** - * @param partner Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + * @param partner Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM * * @return builder * @@ -483,6 +499,27 @@ public Builder partnerPermissions(WorkloadPartnerPermissionsArgs partnerPermissi return partnerPermissions(Output.of(partnerPermissions)); } + /** + * @param partnerServicesBillingAccount Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + * + * @return builder + * + */ + public Builder partnerServicesBillingAccount(@Nullable Output partnerServicesBillingAccount) { + $.partnerServicesBillingAccount = partnerServicesBillingAccount; + return this; + } + + /** + * @param partnerServicesBillingAccount Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + * + * @return builder + * + */ + public Builder partnerServicesBillingAccount(String partnerServicesBillingAccount) { + return partnerServicesBillingAccount(Output.of(partnerServicesBillingAccount)); + } + /** * @param provisionedResourcesParent Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/assuredworkloads/inputs/WorkloadState.java b/sdk/java/src/main/java/com/pulumi/gcp/assuredworkloads/inputs/WorkloadState.java index eba788aba3..56c86713c0 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/assuredworkloads/inputs/WorkloadState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/assuredworkloads/inputs/WorkloadState.java @@ -41,14 +41,14 @@ public Optional> billingAccount() { } /** - * Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + * Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS * */ @Import(name="complianceRegime") private @Nullable Output complianceRegime; /** - * @return Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + * @return Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS * */ public Optional> complianceRegime() { @@ -261,14 +261,14 @@ public Optional> organization() { } /** - * Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + * Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM * */ @Import(name="partner") private @Nullable Output partner; /** - * @return Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + * @return Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM * */ public Optional> partner() { @@ -290,6 +290,21 @@ public Optional> partnerPermissions() { return Optional.ofNullable(this.partnerPermissions); } + /** + * Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + * + */ + @Import(name="partnerServicesBillingAccount") + private @Nullable Output partnerServicesBillingAccount; + + /** + * @return Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + * + */ + public Optional> partnerServicesBillingAccount() { + return Optional.ofNullable(this.partnerServicesBillingAccount); + } + /** * Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} * @@ -400,6 +415,7 @@ private WorkloadState(WorkloadState $) { this.organization = $.organization; this.partner = $.partner; this.partnerPermissions = $.partnerPermissions; + this.partnerServicesBillingAccount = $.partnerServicesBillingAccount; this.provisionedResourcesParent = $.provisionedResourcesParent; this.pulumiLabels = $.pulumiLabels; this.resourceSettings = $.resourceSettings; @@ -448,7 +464,7 @@ public Builder billingAccount(String billingAccount) { } /** - * @param complianceRegime Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + * @param complianceRegime Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS * * @return builder * @@ -459,7 +475,7 @@ public Builder complianceRegime(@Nullable Output complianceRegime) { } /** - * @param complianceRegime Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + * @param complianceRegime Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS * * @return builder * @@ -782,7 +798,7 @@ public Builder organization(String organization) { } /** - * @param partner Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + * @param partner Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM * * @return builder * @@ -793,7 +809,7 @@ public Builder partner(@Nullable Output partner) { } /** - * @param partner Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + * @param partner Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM * * @return builder * @@ -823,6 +839,27 @@ public Builder partnerPermissions(WorkloadPartnerPermissionsArgs partnerPermissi return partnerPermissions(Output.of(partnerPermissions)); } + /** + * @param partnerServicesBillingAccount Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + * + * @return builder + * + */ + public Builder partnerServicesBillingAccount(@Nullable Output partnerServicesBillingAccount) { + $.partnerServicesBillingAccount = partnerServicesBillingAccount; + return this; + } + + /** + * @param partnerServicesBillingAccount Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + * + * @return builder + * + */ + public Builder partnerServicesBillingAccount(String partnerServicesBillingAccount) { + return partnerServicesBillingAccount(Output.of(partnerServicesBillingAccount)); + } + /** * @param provisionedResourcesParent Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/backupdisasterrecovery/BackupVault.java b/sdk/java/src/main/java/com/pulumi/gcp/backupdisasterrecovery/BackupVault.java new file mode 100644 index 0000000000..8af433039f --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/backupdisasterrecovery/BackupVault.java @@ -0,0 +1,527 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.backupdisasterrecovery; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Export; +import com.pulumi.core.annotations.ResourceType; +import com.pulumi.core.internal.Codegen; +import com.pulumi.gcp.Utilities; +import com.pulumi.gcp.backupdisasterrecovery.BackupVaultArgs; +import com.pulumi.gcp.backupdisasterrecovery.inputs.BackupVaultState; +import java.lang.Boolean; +import java.lang.String; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import javax.annotation.Nullable; + +/** + * ## Example Usage + * + * ### Backup Dr Backup Vault Full + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.backupdisasterrecovery.BackupVault;
+ * import com.pulumi.gcp.backupdisasterrecovery.BackupVaultArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var backup_vault_test = new BackupVault("backup-vault-test", BackupVaultArgs.builder()
+ *             .location("us-central1")
+ *             .backupVaultId("backup-vault-test")
+ *             .description("This is a second backup vault built by Terraform.")
+ *             .backupMinimumEnforcedRetentionDuration("100000s")
+ *             .labels(Map.ofEntries(
+ *                 Map.entry("foo", "bar1"),
+ *                 Map.entry("bar", "baz1")
+ *             ))
+ *             .annotations(Map.ofEntries(
+ *                 Map.entry("annotations1", "bar1"),
+ *                 Map.entry("annotations2", "baz1")
+ *             ))
+ *             .forceUpdate("true")
+ *             .forceDelete("true")
+ *             .allowMissing("true")
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> + * + * ## Import + * + * BackupVault can be imported using any of these accepted formats: + * + * * `projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}` + * + * * `{{project}}/{{location}}/{{backup_vault_id}}` + * + * * `{{location}}/{{backup_vault_id}}` + * + * When using the `pulumi import` command, BackupVault can be imported using one of the formats above. For example: + * + * ```sh + * $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}} + * ``` + * + * ```sh + * $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default {{project}}/{{location}}/{{backup_vault_id}} + * ``` + * + * ```sh + * $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default {{location}}/{{backup_vault_id}} + * ``` + * + */ +@ResourceType(type="gcp:backupdisasterrecovery/backupVault:BackupVault") +public class BackupVault extends com.pulumi.resources.CustomResource { + /** + * Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + * + */ + @Export(name="allowMissing", refs={Boolean.class}, tree="[0]") + private Output allowMissing; + + /** + * @return Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + * + */ + public Output> allowMissing() { + return Codegen.optional(this.allowMissing); + } + /** + * Optional. User annotations. See https://google.aip.dev/128#annotations + * Stores small amounts of arbitrary data. + * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + * Please refer to the field `effective_annotations` for all of the annotations present on the resource. + * + */ + @Export(name="annotations", refs={Map.class,String.class}, tree="[0,1,1]") + private Output> annotations; + + /** + * @return Optional. User annotations. See https://google.aip.dev/128#annotations + * Stores small amounts of arbitrary data. + * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + * Please refer to the field `effective_annotations` for all of the annotations present on the resource. + * + */ + public Output>> annotations() { + return Codegen.optional(this.annotations); + } + /** + * Output only. The number of backups in this backup vault. + * + */ + @Export(name="backupCount", refs={String.class}, tree="[0]") + private Output backupCount; + + /** + * @return Output only. The number of backups in this backup vault. + * + */ + public Output backupCount() { + return this.backupCount; + } + /** + * Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + * + */ + @Export(name="backupMinimumEnforcedRetentionDuration", refs={String.class}, tree="[0]") + private Output backupMinimumEnforcedRetentionDuration; + + /** + * @return Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + * + */ + public Output backupMinimumEnforcedRetentionDuration() { + return this.backupMinimumEnforcedRetentionDuration; + } + /** + * Required. ID of the requesting object. + * + * *** + * + */ + @Export(name="backupVaultId", refs={String.class}, tree="[0]") + private Output backupVaultId; + + /** + * @return Required. ID of the requesting object. + * + * *** + * + */ + public Output backupVaultId() { + return this.backupVaultId; + } + /** + * Output only. The time when the instance was created. + * + */ + @Export(name="createTime", refs={String.class}, tree="[0]") + private Output createTime; + + /** + * @return Output only. The time when the instance was created. + * + */ + public Output createTime() { + return this.createTime; + } + /** + * Output only. Set to true when there are no backups nested under this resource. + * + */ + @Export(name="deletable", refs={Boolean.class}, tree="[0]") + private Output deletable; + + /** + * @return Output only. Set to true when there are no backups nested under this resource. + * + */ + public Output deletable() { + return this.deletable; + } + /** + * Optional. The description of the BackupVault instance (2048 characters or less). + * + */ + @Export(name="description", refs={String.class}, tree="[0]") + private Output description; + + /** + * @return Optional. The description of the BackupVault instance (2048 characters or less). + * + */ + public Output> description() { + return Codegen.optional(this.description); + } + @Export(name="effectiveAnnotations", refs={Map.class,String.class}, tree="[0,1,1]") + private Output> effectiveAnnotations; + + public Output> effectiveAnnotations() { + return this.effectiveAnnotations; + } + /** + * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + * + */ + @Export(name="effectiveLabels", refs={Map.class,String.class}, tree="[0,1,1]") + private Output> effectiveLabels; + + /** + * @return All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + * + */ + public Output> effectiveLabels() { + return this.effectiveLabels; + } + /** + * Optional. Time after which the BackupVault resource is locked. + * + */ + @Export(name="effectiveTime", refs={String.class}, tree="[0]") + private Output effectiveTime; + + /** + * @return Optional. Time after which the BackupVault resource is locked. + * + */ + public Output> effectiveTime() { + return Codegen.optional(this.effectiveTime); + } + /** + * Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + * + */ + @Export(name="etag", refs={String.class}, tree="[0]") + private Output etag; + + /** + * @return Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + * + */ + public Output etag() { + return this.etag; + } + /** + * If set, the following restrictions against deletion of the backup vault instance can be overridden: + * * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * * deletion of a backup vault instance that is being referenced by an active backup plan. + * + */ + @Export(name="forceDelete", refs={Boolean.class}, tree="[0]") + private Output forceDelete; + + /** + * @return If set, the following restrictions against deletion of the backup vault instance can be overridden: + * * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * * deletion of a backup vault instance that is being referenced by an active backup plan. + * + */ + public Output> forceDelete() { + return Codegen.optional(this.forceDelete); + } + /** + * If set, allow update to extend the minimum enforced retention for backup vault. This overrides + * the restriction against conflicting retention periods. This conflict may occur when the + * expiration schedule defined by the associated backup plan is shorter than the minimum + * retention set by the backup vault. + * + */ + @Export(name="forceUpdate", refs={Boolean.class}, tree="[0]") + private Output forceUpdate; + + /** + * @return If set, allow update to extend the minimum enforced retention for backup vault. This overrides + * the restriction against conflicting retention periods. This conflict may occur when the + * expiration schedule defined by the associated backup plan is shorter than the minimum + * retention set by the backup vault. + * + */ + public Output> forceUpdate() { + return Codegen.optional(this.forceUpdate); + } + /** + * Optional. Resource labels to represent user provided metadata. + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. + * + */ + @Export(name="labels", refs={Map.class,String.class}, tree="[0,1,1]") + private Output> labels; + + /** + * @return Optional. Resource labels to represent user provided metadata. + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. + * + */ + public Output>> labels() { + return Codegen.optional(this.labels); + } + /** + * The GCP location for the backup vault. + * + */ + @Export(name="location", refs={String.class}, tree="[0]") + private Output location; + + /** + * @return The GCP location for the backup vault. + * + */ + public Output location() { + return this.location; + } + /** + * Output only. Identifier. The resource name. + * + */ + @Export(name="name", refs={String.class}, tree="[0]") + private Output name; + + /** + * @return Output only. Identifier. The resource name. + * + */ + public Output name() { + return this.name; + } + /** + * The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + */ + @Export(name="project", refs={String.class}, tree="[0]") + private Output project; + + /** + * @return The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + */ + public Output project() { + return this.project; + } + /** + * The combination of labels configured directly on the resource + * and default labels configured on the provider. + * + */ + @Export(name="pulumiLabels", refs={Map.class,String.class}, tree="[0,1,1]") + private Output> pulumiLabels; + + /** + * @return The combination of labels configured directly on the resource + * and default labels configured on the provider. + * + */ + public Output> pulumiLabels() { + return this.pulumiLabels; + } + /** + * Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + * + */ + @Export(name="serviceAccount", refs={String.class}, tree="[0]") + private Output serviceAccount; + + /** + * @return Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + * + */ + public Output serviceAccount() { + return this.serviceAccount; + } + /** + * Output only. The BackupVault resource instance state. + * Possible values: + * STATE_UNSPECIFIED + * CREATING + * ACTIVE + * DELETING + * ERROR + * + */ + @Export(name="state", refs={String.class}, tree="[0]") + private Output state; + + /** + * @return Output only. The BackupVault resource instance state. + * Possible values: + * STATE_UNSPECIFIED + * CREATING + * ACTIVE + * DELETING + * ERROR + * + */ + public Output state() { + return this.state; + } + /** + * Output only. Total size of the storage used by all backup resources. + * + */ + @Export(name="totalStoredBytes", refs={String.class}, tree="[0]") + private Output totalStoredBytes; + + /** + * @return Output only. Total size of the storage used by all backup resources. + * + */ + public Output totalStoredBytes() { + return this.totalStoredBytes; + } + /** + * Output only. Output only Immutable after resource creation until resource deletion. + * + */ + @Export(name="uid", refs={String.class}, tree="[0]") + private Output uid; + + /** + * @return Output only. Output only Immutable after resource creation until resource deletion. + * + */ + public Output uid() { + return this.uid; + } + /** + * Output only. The time when the instance was updated. + * + */ + @Export(name="updateTime", refs={String.class}, tree="[0]") + private Output updateTime; + + /** + * @return Output only. The time when the instance was updated. + * + */ + public Output updateTime() { + return this.updateTime; + } + + /** + * + * @param name The _unique_ name of the resulting resource. + */ + public BackupVault(java.lang.String name) { + this(name, BackupVaultArgs.Empty); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + */ + public BackupVault(java.lang.String name, BackupVaultArgs args) { + this(name, args, null); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + * @param options A bag of options that control this resource's behavior. + */ + public BackupVault(java.lang.String name, BackupVaultArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("gcp:backupdisasterrecovery/backupVault:BackupVault", name, makeArgs(args, options), makeResourceOptions(options, Codegen.empty()), false); + } + + private BackupVault(java.lang.String name, Output id, @Nullable BackupVaultState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("gcp:backupdisasterrecovery/backupVault:BackupVault", name, state, makeResourceOptions(options, id), false); + } + + private static BackupVaultArgs makeArgs(BackupVaultArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { + if (options != null && options.getUrn().isPresent()) { + return null; + } + return args == null ? BackupVaultArgs.Empty : args; + } + + private static com.pulumi.resources.CustomResourceOptions makeResourceOptions(@Nullable com.pulumi.resources.CustomResourceOptions options, @Nullable Output id) { + var defaultOptions = com.pulumi.resources.CustomResourceOptions.builder() + .version(Utilities.getVersion()) + .additionalSecretOutputs(List.of( + "effectiveLabels", + "pulumiLabels" + )) + .build(); + return com.pulumi.resources.CustomResourceOptions.merge(defaultOptions, options, id); + } + + /** + * Get an existing Host resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state + * @param options Optional settings to control the behavior of the CustomResource. + */ + public static BackupVault get(java.lang.String name, Output id, @Nullable BackupVaultState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + return new BackupVault(name, id, state, options); + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/backupdisasterrecovery/BackupVaultArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/backupdisasterrecovery/BackupVaultArgs.java new file mode 100644 index 0000000000..9fd22f2f86 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/backupdisasterrecovery/BackupVaultArgs.java @@ -0,0 +1,517 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.backupdisasterrecovery; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.Boolean; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class BackupVaultArgs extends com.pulumi.resources.ResourceArgs { + + public static final BackupVaultArgs Empty = new BackupVaultArgs(); + + /** + * Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + * + */ + @Import(name="allowMissing") + private @Nullable Output allowMissing; + + /** + * @return Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + * + */ + public Optional> allowMissing() { + return Optional.ofNullable(this.allowMissing); + } + + /** + * Optional. User annotations. See https://google.aip.dev/128#annotations + * Stores small amounts of arbitrary data. + * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + * Please refer to the field `effective_annotations` for all of the annotations present on the resource. + * + */ + @Import(name="annotations") + private @Nullable Output> annotations; + + /** + * @return Optional. User annotations. See https://google.aip.dev/128#annotations + * Stores small amounts of arbitrary data. + * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + * Please refer to the field `effective_annotations` for all of the annotations present on the resource. + * + */ + public Optional>> annotations() { + return Optional.ofNullable(this.annotations); + } + + /** + * Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + * + */ + @Import(name="backupMinimumEnforcedRetentionDuration", required=true) + private Output backupMinimumEnforcedRetentionDuration; + + /** + * @return Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + * + */ + public Output backupMinimumEnforcedRetentionDuration() { + return this.backupMinimumEnforcedRetentionDuration; + } + + /** + * Required. ID of the requesting object. + * + * *** + * + */ + @Import(name="backupVaultId", required=true) + private Output backupVaultId; + + /** + * @return Required. ID of the requesting object. + * + * *** + * + */ + public Output backupVaultId() { + return this.backupVaultId; + } + + /** + * Optional. The description of the BackupVault instance (2048 characters or less). + * + */ + @Import(name="description") + private @Nullable Output description; + + /** + * @return Optional. The description of the BackupVault instance (2048 characters or less). + * + */ + public Optional> description() { + return Optional.ofNullable(this.description); + } + + /** + * Optional. Time after which the BackupVault resource is locked. + * + */ + @Import(name="effectiveTime") + private @Nullable Output effectiveTime; + + /** + * @return Optional. Time after which the BackupVault resource is locked. + * + */ + public Optional> effectiveTime() { + return Optional.ofNullable(this.effectiveTime); + } + + /** + * If set, the following restrictions against deletion of the backup vault instance can be overridden: + * * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * * deletion of a backup vault instance that is being referenced by an active backup plan. + * + */ + @Import(name="forceDelete") + private @Nullable Output forceDelete; + + /** + * @return If set, the following restrictions against deletion of the backup vault instance can be overridden: + * * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * * deletion of a backup vault instance that is being referenced by an active backup plan. + * + */ + public Optional> forceDelete() { + return Optional.ofNullable(this.forceDelete); + } + + /** + * If set, allow update to extend the minimum enforced retention for backup vault. This overrides + * the restriction against conflicting retention periods. This conflict may occur when the + * expiration schedule defined by the associated backup plan is shorter than the minimum + * retention set by the backup vault. + * + */ + @Import(name="forceUpdate") + private @Nullable Output forceUpdate; + + /** + * @return If set, allow update to extend the minimum enforced retention for backup vault. This overrides + * the restriction against conflicting retention periods. This conflict may occur when the + * expiration schedule defined by the associated backup plan is shorter than the minimum + * retention set by the backup vault. + * + */ + public Optional> forceUpdate() { + return Optional.ofNullable(this.forceUpdate); + } + + /** + * Optional. Resource labels to represent user provided metadata. + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. + * + */ + @Import(name="labels") + private @Nullable Output> labels; + + /** + * @return Optional. Resource labels to represent user provided metadata. + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. + * + */ + public Optional>> labels() { + return Optional.ofNullable(this.labels); + } + + /** + * The GCP location for the backup vault. + * + */ + @Import(name="location", required=true) + private Output location; + + /** + * @return The GCP location for the backup vault. + * + */ + public Output location() { + return this.location; + } + + /** + * The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + */ + @Import(name="project") + private @Nullable Output project; + + /** + * @return The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + */ + public Optional> project() { + return Optional.ofNullable(this.project); + } + + private BackupVaultArgs() {} + + private BackupVaultArgs(BackupVaultArgs $) { + this.allowMissing = $.allowMissing; + this.annotations = $.annotations; + this.backupMinimumEnforcedRetentionDuration = $.backupMinimumEnforcedRetentionDuration; + this.backupVaultId = $.backupVaultId; + this.description = $.description; + this.effectiveTime = $.effectiveTime; + this.forceDelete = $.forceDelete; + this.forceUpdate = $.forceUpdate; + this.labels = $.labels; + this.location = $.location; + this.project = $.project; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(BackupVaultArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private BackupVaultArgs $; + + public Builder() { + $ = new BackupVaultArgs(); + } + + public Builder(BackupVaultArgs defaults) { + $ = new BackupVaultArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param allowMissing Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + * + * @return builder + * + */ + public Builder allowMissing(@Nullable Output allowMissing) { + $.allowMissing = allowMissing; + return this; + } + + /** + * @param allowMissing Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + * + * @return builder + * + */ + public Builder allowMissing(Boolean allowMissing) { + return allowMissing(Output.of(allowMissing)); + } + + /** + * @param annotations Optional. User annotations. See https://google.aip.dev/128#annotations + * Stores small amounts of arbitrary data. + * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + * Please refer to the field `effective_annotations` for all of the annotations present on the resource. + * + * @return builder + * + */ + public Builder annotations(@Nullable Output> annotations) { + $.annotations = annotations; + return this; + } + + /** + * @param annotations Optional. User annotations. See https://google.aip.dev/128#annotations + * Stores small amounts of arbitrary data. + * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + * Please refer to the field `effective_annotations` for all of the annotations present on the resource. + * + * @return builder + * + */ + public Builder annotations(Map annotations) { + return annotations(Output.of(annotations)); + } + + /** + * @param backupMinimumEnforcedRetentionDuration Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + * + * @return builder + * + */ + public Builder backupMinimumEnforcedRetentionDuration(Output backupMinimumEnforcedRetentionDuration) { + $.backupMinimumEnforcedRetentionDuration = backupMinimumEnforcedRetentionDuration; + return this; + } + + /** + * @param backupMinimumEnforcedRetentionDuration Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + * + * @return builder + * + */ + public Builder backupMinimumEnforcedRetentionDuration(String backupMinimumEnforcedRetentionDuration) { + return backupMinimumEnforcedRetentionDuration(Output.of(backupMinimumEnforcedRetentionDuration)); + } + + /** + * @param backupVaultId Required. ID of the requesting object. + * + * *** + * + * @return builder + * + */ + public Builder backupVaultId(Output backupVaultId) { + $.backupVaultId = backupVaultId; + return this; + } + + /** + * @param backupVaultId Required. ID of the requesting object. + * + * *** + * + * @return builder + * + */ + public Builder backupVaultId(String backupVaultId) { + return backupVaultId(Output.of(backupVaultId)); + } + + /** + * @param description Optional. The description of the BackupVault instance (2048 characters or less). + * + * @return builder + * + */ + public Builder description(@Nullable Output description) { + $.description = description; + return this; + } + + /** + * @param description Optional. The description of the BackupVault instance (2048 characters or less). + * + * @return builder + * + */ + public Builder description(String description) { + return description(Output.of(description)); + } + + /** + * @param effectiveTime Optional. Time after which the BackupVault resource is locked. + * + * @return builder + * + */ + public Builder effectiveTime(@Nullable Output effectiveTime) { + $.effectiveTime = effectiveTime; + return this; + } + + /** + * @param effectiveTime Optional. Time after which the BackupVault resource is locked. + * + * @return builder + * + */ + public Builder effectiveTime(String effectiveTime) { + return effectiveTime(Output.of(effectiveTime)); + } + + /** + * @param forceDelete If set, the following restrictions against deletion of the backup vault instance can be overridden: + * * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * * deletion of a backup vault instance that is being referenced by an active backup plan. + * + * @return builder + * + */ + public Builder forceDelete(@Nullable Output forceDelete) { + $.forceDelete = forceDelete; + return this; + } + + /** + * @param forceDelete If set, the following restrictions against deletion of the backup vault instance can be overridden: + * * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * * deletion of a backup vault instance that is being referenced by an active backup plan. + * + * @return builder + * + */ + public Builder forceDelete(Boolean forceDelete) { + return forceDelete(Output.of(forceDelete)); + } + + /** + * @param forceUpdate If set, allow update to extend the minimum enforced retention for backup vault. This overrides + * the restriction against conflicting retention periods. This conflict may occur when the + * expiration schedule defined by the associated backup plan is shorter than the minimum + * retention set by the backup vault. + * + * @return builder + * + */ + public Builder forceUpdate(@Nullable Output forceUpdate) { + $.forceUpdate = forceUpdate; + return this; + } + + /** + * @param forceUpdate If set, allow update to extend the minimum enforced retention for backup vault. This overrides + * the restriction against conflicting retention periods. This conflict may occur when the + * expiration schedule defined by the associated backup plan is shorter than the minimum + * retention set by the backup vault. + * + * @return builder + * + */ + public Builder forceUpdate(Boolean forceUpdate) { + return forceUpdate(Output.of(forceUpdate)); + } + + /** + * @param labels Optional. Resource labels to represent user provided metadata. + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. + * + * @return builder + * + */ + public Builder labels(@Nullable Output> labels) { + $.labels = labels; + return this; + } + + /** + * @param labels Optional. Resource labels to represent user provided metadata. + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. + * + * @return builder + * + */ + public Builder labels(Map labels) { + return labels(Output.of(labels)); + } + + /** + * @param location The GCP location for the backup vault. + * + * @return builder + * + */ + public Builder location(Output location) { + $.location = location; + return this; + } + + /** + * @param location The GCP location for the backup vault. + * + * @return builder + * + */ + public Builder location(String location) { + return location(Output.of(location)); + } + + /** + * @param project The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + * @return builder + * + */ + public Builder project(@Nullable Output project) { + $.project = project; + return this; + } + + /** + * @param project The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + * @return builder + * + */ + public Builder project(String project) { + return project(Output.of(project)); + } + + public BackupVaultArgs build() { + if ($.backupMinimumEnforcedRetentionDuration == null) { + throw new MissingRequiredPropertyException("BackupVaultArgs", "backupMinimumEnforcedRetentionDuration"); + } + if ($.backupVaultId == null) { + throw new MissingRequiredPropertyException("BackupVaultArgs", "backupVaultId"); + } + if ($.location == null) { + throw new MissingRequiredPropertyException("BackupVaultArgs", "location"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/backupdisasterrecovery/inputs/BackupVaultState.java b/sdk/java/src/main/java/com/pulumi/gcp/backupdisasterrecovery/inputs/BackupVaultState.java new file mode 100644 index 0000000000..f1eeb4304f --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/backupdisasterrecovery/inputs/BackupVaultState.java @@ -0,0 +1,996 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.backupdisasterrecovery.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Boolean; +import java.lang.String; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class BackupVaultState extends com.pulumi.resources.ResourceArgs { + + public static final BackupVaultState Empty = new BackupVaultState(); + + /** + * Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + * + */ + @Import(name="allowMissing") + private @Nullable Output allowMissing; + + /** + * @return Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + * + */ + public Optional> allowMissing() { + return Optional.ofNullable(this.allowMissing); + } + + /** + * Optional. User annotations. See https://google.aip.dev/128#annotations + * Stores small amounts of arbitrary data. + * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + * Please refer to the field `effective_annotations` for all of the annotations present on the resource. + * + */ + @Import(name="annotations") + private @Nullable Output> annotations; + + /** + * @return Optional. User annotations. See https://google.aip.dev/128#annotations + * Stores small amounts of arbitrary data. + * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + * Please refer to the field `effective_annotations` for all of the annotations present on the resource. + * + */ + public Optional>> annotations() { + return Optional.ofNullable(this.annotations); + } + + /** + * Output only. The number of backups in this backup vault. + * + */ + @Import(name="backupCount") + private @Nullable Output backupCount; + + /** + * @return Output only. The number of backups in this backup vault. + * + */ + public Optional> backupCount() { + return Optional.ofNullable(this.backupCount); + } + + /** + * Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + * + */ + @Import(name="backupMinimumEnforcedRetentionDuration") + private @Nullable Output backupMinimumEnforcedRetentionDuration; + + /** + * @return Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + * + */ + public Optional> backupMinimumEnforcedRetentionDuration() { + return Optional.ofNullable(this.backupMinimumEnforcedRetentionDuration); + } + + /** + * Required. ID of the requesting object. + * + * *** + * + */ + @Import(name="backupVaultId") + private @Nullable Output backupVaultId; + + /** + * @return Required. ID of the requesting object. + * + * *** + * + */ + public Optional> backupVaultId() { + return Optional.ofNullable(this.backupVaultId); + } + + /** + * Output only. The time when the instance was created. + * + */ + @Import(name="createTime") + private @Nullable Output createTime; + + /** + * @return Output only. The time when the instance was created. + * + */ + public Optional> createTime() { + return Optional.ofNullable(this.createTime); + } + + /** + * Output only. Set to true when there are no backups nested under this resource. + * + */ + @Import(name="deletable") + private @Nullable Output deletable; + + /** + * @return Output only. Set to true when there are no backups nested under this resource. + * + */ + public Optional> deletable() { + return Optional.ofNullable(this.deletable); + } + + /** + * Optional. The description of the BackupVault instance (2048 characters or less). + * + */ + @Import(name="description") + private @Nullable Output description; + + /** + * @return Optional. The description of the BackupVault instance (2048 characters or less). + * + */ + public Optional> description() { + return Optional.ofNullable(this.description); + } + + @Import(name="effectiveAnnotations") + private @Nullable Output> effectiveAnnotations; + + public Optional>> effectiveAnnotations() { + return Optional.ofNullable(this.effectiveAnnotations); + } + + /** + * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + * + */ + @Import(name="effectiveLabels") + private @Nullable Output> effectiveLabels; + + /** + * @return All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + * + */ + public Optional>> effectiveLabels() { + return Optional.ofNullable(this.effectiveLabels); + } + + /** + * Optional. Time after which the BackupVault resource is locked. + * + */ + @Import(name="effectiveTime") + private @Nullable Output effectiveTime; + + /** + * @return Optional. Time after which the BackupVault resource is locked. + * + */ + public Optional> effectiveTime() { + return Optional.ofNullable(this.effectiveTime); + } + + /** + * Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + * + */ + @Import(name="etag") + private @Nullable Output etag; + + /** + * @return Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + * + */ + public Optional> etag() { + return Optional.ofNullable(this.etag); + } + + /** + * If set, the following restrictions against deletion of the backup vault instance can be overridden: + * * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * * deletion of a backup vault instance that is being referenced by an active backup plan. + * + */ + @Import(name="forceDelete") + private @Nullable Output forceDelete; + + /** + * @return If set, the following restrictions against deletion of the backup vault instance can be overridden: + * * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * * deletion of a backup vault instance that is being referenced by an active backup plan. + * + */ + public Optional> forceDelete() { + return Optional.ofNullable(this.forceDelete); + } + + /** + * If set, allow update to extend the minimum enforced retention for backup vault. This overrides + * the restriction against conflicting retention periods. This conflict may occur when the + * expiration schedule defined by the associated backup plan is shorter than the minimum + * retention set by the backup vault. + * + */ + @Import(name="forceUpdate") + private @Nullable Output forceUpdate; + + /** + * @return If set, allow update to extend the minimum enforced retention for backup vault. This overrides + * the restriction against conflicting retention periods. This conflict may occur when the + * expiration schedule defined by the associated backup plan is shorter than the minimum + * retention set by the backup vault. + * + */ + public Optional> forceUpdate() { + return Optional.ofNullable(this.forceUpdate); + } + + /** + * Optional. Resource labels to represent user provided metadata. + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. + * + */ + @Import(name="labels") + private @Nullable Output> labels; + + /** + * @return Optional. Resource labels to represent user provided metadata. + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. + * + */ + public Optional>> labels() { + return Optional.ofNullable(this.labels); + } + + /** + * The GCP location for the backup vault. + * + */ + @Import(name="location") + private @Nullable Output location; + + /** + * @return The GCP location for the backup vault. + * + */ + public Optional> location() { + return Optional.ofNullable(this.location); + } + + /** + * Output only. Identifier. The resource name. + * + */ + @Import(name="name") + private @Nullable Output name; + + /** + * @return Output only. Identifier. The resource name. + * + */ + public Optional> name() { + return Optional.ofNullable(this.name); + } + + /** + * The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + */ + @Import(name="project") + private @Nullable Output project; + + /** + * @return The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + */ + public Optional> project() { + return Optional.ofNullable(this.project); + } + + /** + * The combination of labels configured directly on the resource + * and default labels configured on the provider. + * + */ + @Import(name="pulumiLabels") + private @Nullable Output> pulumiLabels; + + /** + * @return The combination of labels configured directly on the resource + * and default labels configured on the provider. + * + */ + public Optional>> pulumiLabels() { + return Optional.ofNullable(this.pulumiLabels); + } + + /** + * Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + * + */ + @Import(name="serviceAccount") + private @Nullable Output serviceAccount; + + /** + * @return Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + * + */ + public Optional> serviceAccount() { + return Optional.ofNullable(this.serviceAccount); + } + + /** + * Output only. The BackupVault resource instance state. + * Possible values: + * STATE_UNSPECIFIED + * CREATING + * ACTIVE + * DELETING + * ERROR + * + */ + @Import(name="state") + private @Nullable Output state; + + /** + * @return Output only. The BackupVault resource instance state. + * Possible values: + * STATE_UNSPECIFIED + * CREATING + * ACTIVE + * DELETING + * ERROR + * + */ + public Optional> state() { + return Optional.ofNullable(this.state); + } + + /** + * Output only. Total size of the storage used by all backup resources. + * + */ + @Import(name="totalStoredBytes") + private @Nullable Output totalStoredBytes; + + /** + * @return Output only. Total size of the storage used by all backup resources. + * + */ + public Optional> totalStoredBytes() { + return Optional.ofNullable(this.totalStoredBytes); + } + + /** + * Output only. Output only Immutable after resource creation until resource deletion. + * + */ + @Import(name="uid") + private @Nullable Output uid; + + /** + * @return Output only. Output only Immutable after resource creation until resource deletion. + * + */ + public Optional> uid() { + return Optional.ofNullable(this.uid); + } + + /** + * Output only. The time when the instance was updated. + * + */ + @Import(name="updateTime") + private @Nullable Output updateTime; + + /** + * @return Output only. The time when the instance was updated. + * + */ + public Optional> updateTime() { + return Optional.ofNullable(this.updateTime); + } + + private BackupVaultState() {} + + private BackupVaultState(BackupVaultState $) { + this.allowMissing = $.allowMissing; + this.annotations = $.annotations; + this.backupCount = $.backupCount; + this.backupMinimumEnforcedRetentionDuration = $.backupMinimumEnforcedRetentionDuration; + this.backupVaultId = $.backupVaultId; + this.createTime = $.createTime; + this.deletable = $.deletable; + this.description = $.description; + this.effectiveAnnotations = $.effectiveAnnotations; + this.effectiveLabels = $.effectiveLabels; + this.effectiveTime = $.effectiveTime; + this.etag = $.etag; + this.forceDelete = $.forceDelete; + this.forceUpdate = $.forceUpdate; + this.labels = $.labels; + this.location = $.location; + this.name = $.name; + this.project = $.project; + this.pulumiLabels = $.pulumiLabels; + this.serviceAccount = $.serviceAccount; + this.state = $.state; + this.totalStoredBytes = $.totalStoredBytes; + this.uid = $.uid; + this.updateTime = $.updateTime; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(BackupVaultState defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private BackupVaultState $; + + public Builder() { + $ = new BackupVaultState(); + } + + public Builder(BackupVaultState defaults) { + $ = new BackupVaultState(Objects.requireNonNull(defaults)); + } + + /** + * @param allowMissing Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + * + * @return builder + * + */ + public Builder allowMissing(@Nullable Output allowMissing) { + $.allowMissing = allowMissing; + return this; + } + + /** + * @param allowMissing Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + * + * @return builder + * + */ + public Builder allowMissing(Boolean allowMissing) { + return allowMissing(Output.of(allowMissing)); + } + + /** + * @param annotations Optional. User annotations. See https://google.aip.dev/128#annotations + * Stores small amounts of arbitrary data. + * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + * Please refer to the field `effective_annotations` for all of the annotations present on the resource. + * + * @return builder + * + */ + public Builder annotations(@Nullable Output> annotations) { + $.annotations = annotations; + return this; + } + + /** + * @param annotations Optional. User annotations. See https://google.aip.dev/128#annotations + * Stores small amounts of arbitrary data. + * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + * Please refer to the field `effective_annotations` for all of the annotations present on the resource. + * + * @return builder + * + */ + public Builder annotations(Map annotations) { + return annotations(Output.of(annotations)); + } + + /** + * @param backupCount Output only. The number of backups in this backup vault. + * + * @return builder + * + */ + public Builder backupCount(@Nullable Output backupCount) { + $.backupCount = backupCount; + return this; + } + + /** + * @param backupCount Output only. The number of backups in this backup vault. + * + * @return builder + * + */ + public Builder backupCount(String backupCount) { + return backupCount(Output.of(backupCount)); + } + + /** + * @param backupMinimumEnforcedRetentionDuration Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + * + * @return builder + * + */ + public Builder backupMinimumEnforcedRetentionDuration(@Nullable Output backupMinimumEnforcedRetentionDuration) { + $.backupMinimumEnforcedRetentionDuration = backupMinimumEnforcedRetentionDuration; + return this; + } + + /** + * @param backupMinimumEnforcedRetentionDuration Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + * + * @return builder + * + */ + public Builder backupMinimumEnforcedRetentionDuration(String backupMinimumEnforcedRetentionDuration) { + return backupMinimumEnforcedRetentionDuration(Output.of(backupMinimumEnforcedRetentionDuration)); + } + + /** + * @param backupVaultId Required. ID of the requesting object. + * + * *** + * + * @return builder + * + */ + public Builder backupVaultId(@Nullable Output backupVaultId) { + $.backupVaultId = backupVaultId; + return this; + } + + /** + * @param backupVaultId Required. ID of the requesting object. + * + * *** + * + * @return builder + * + */ + public Builder backupVaultId(String backupVaultId) { + return backupVaultId(Output.of(backupVaultId)); + } + + /** + * @param createTime Output only. The time when the instance was created. + * + * @return builder + * + */ + public Builder createTime(@Nullable Output createTime) { + $.createTime = createTime; + return this; + } + + /** + * @param createTime Output only. The time when the instance was created. + * + * @return builder + * + */ + public Builder createTime(String createTime) { + return createTime(Output.of(createTime)); + } + + /** + * @param deletable Output only. Set to true when there are no backups nested under this resource. + * + * @return builder + * + */ + public Builder deletable(@Nullable Output deletable) { + $.deletable = deletable; + return this; + } + + /** + * @param deletable Output only. Set to true when there are no backups nested under this resource. + * + * @return builder + * + */ + public Builder deletable(Boolean deletable) { + return deletable(Output.of(deletable)); + } + + /** + * @param description Optional. The description of the BackupVault instance (2048 characters or less). + * + * @return builder + * + */ + public Builder description(@Nullable Output description) { + $.description = description; + return this; + } + + /** + * @param description Optional. The description of the BackupVault instance (2048 characters or less). + * + * @return builder + * + */ + public Builder description(String description) { + return description(Output.of(description)); + } + + public Builder effectiveAnnotations(@Nullable Output> effectiveAnnotations) { + $.effectiveAnnotations = effectiveAnnotations; + return this; + } + + public Builder effectiveAnnotations(Map effectiveAnnotations) { + return effectiveAnnotations(Output.of(effectiveAnnotations)); + } + + /** + * @param effectiveLabels All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + * + * @return builder + * + */ + public Builder effectiveLabels(@Nullable Output> effectiveLabels) { + $.effectiveLabels = effectiveLabels; + return this; + } + + /** + * @param effectiveLabels All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + * + * @return builder + * + */ + public Builder effectiveLabels(Map effectiveLabels) { + return effectiveLabels(Output.of(effectiveLabels)); + } + + /** + * @param effectiveTime Optional. Time after which the BackupVault resource is locked. + * + * @return builder + * + */ + public Builder effectiveTime(@Nullable Output effectiveTime) { + $.effectiveTime = effectiveTime; + return this; + } + + /** + * @param effectiveTime Optional. Time after which the BackupVault resource is locked. + * + * @return builder + * + */ + public Builder effectiveTime(String effectiveTime) { + return effectiveTime(Output.of(effectiveTime)); + } + + /** + * @param etag Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + * + * @return builder + * + */ + public Builder etag(@Nullable Output etag) { + $.etag = etag; + return this; + } + + /** + * @param etag Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + * + * @return builder + * + */ + public Builder etag(String etag) { + return etag(Output.of(etag)); + } + + /** + * @param forceDelete If set, the following restrictions against deletion of the backup vault instance can be overridden: + * * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * * deletion of a backup vault instance that is being referenced by an active backup plan. + * + * @return builder + * + */ + public Builder forceDelete(@Nullable Output forceDelete) { + $.forceDelete = forceDelete; + return this; + } + + /** + * @param forceDelete If set, the following restrictions against deletion of the backup vault instance can be overridden: + * * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * * deletion of a backup vault instance that is being referenced by an active backup plan. + * + * @return builder + * + */ + public Builder forceDelete(Boolean forceDelete) { + return forceDelete(Output.of(forceDelete)); + } + + /** + * @param forceUpdate If set, allow update to extend the minimum enforced retention for backup vault. This overrides + * the restriction against conflicting retention periods. This conflict may occur when the + * expiration schedule defined by the associated backup plan is shorter than the minimum + * retention set by the backup vault. + * + * @return builder + * + */ + public Builder forceUpdate(@Nullable Output forceUpdate) { + $.forceUpdate = forceUpdate; + return this; + } + + /** + * @param forceUpdate If set, allow update to extend the minimum enforced retention for backup vault. This overrides + * the restriction against conflicting retention periods. This conflict may occur when the + * expiration schedule defined by the associated backup plan is shorter than the minimum + * retention set by the backup vault. + * + * @return builder + * + */ + public Builder forceUpdate(Boolean forceUpdate) { + return forceUpdate(Output.of(forceUpdate)); + } + + /** + * @param labels Optional. Resource labels to represent user provided metadata. + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. + * + * @return builder + * + */ + public Builder labels(@Nullable Output> labels) { + $.labels = labels; + return this; + } + + /** + * @param labels Optional. Resource labels to represent user provided metadata. + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. + * + * @return builder + * + */ + public Builder labels(Map labels) { + return labels(Output.of(labels)); + } + + /** + * @param location The GCP location for the backup vault. + * + * @return builder + * + */ + public Builder location(@Nullable Output location) { + $.location = location; + return this; + } + + /** + * @param location The GCP location for the backup vault. + * + * @return builder + * + */ + public Builder location(String location) { + return location(Output.of(location)); + } + + /** + * @param name Output only. Identifier. The resource name. + * + * @return builder + * + */ + public Builder name(@Nullable Output name) { + $.name = name; + return this; + } + + /** + * @param name Output only. Identifier. The resource name. + * + * @return builder + * + */ + public Builder name(String name) { + return name(Output.of(name)); + } + + /** + * @param project The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + * @return builder + * + */ + public Builder project(@Nullable Output project) { + $.project = project; + return this; + } + + /** + * @param project The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + * @return builder + * + */ + public Builder project(String project) { + return project(Output.of(project)); + } + + /** + * @param pulumiLabels The combination of labels configured directly on the resource + * and default labels configured on the provider. + * + * @return builder + * + */ + public Builder pulumiLabels(@Nullable Output> pulumiLabels) { + $.pulumiLabels = pulumiLabels; + return this; + } + + /** + * @param pulumiLabels The combination of labels configured directly on the resource + * and default labels configured on the provider. + * + * @return builder + * + */ + public Builder pulumiLabels(Map pulumiLabels) { + return pulumiLabels(Output.of(pulumiLabels)); + } + + /** + * @param serviceAccount Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + * + * @return builder + * + */ + public Builder serviceAccount(@Nullable Output serviceAccount) { + $.serviceAccount = serviceAccount; + return this; + } + + /** + * @param serviceAccount Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + * + * @return builder + * + */ + public Builder serviceAccount(String serviceAccount) { + return serviceAccount(Output.of(serviceAccount)); + } + + /** + * @param state Output only. The BackupVault resource instance state. + * Possible values: + * STATE_UNSPECIFIED + * CREATING + * ACTIVE + * DELETING + * ERROR + * + * @return builder + * + */ + public Builder state(@Nullable Output state) { + $.state = state; + return this; + } + + /** + * @param state Output only. The BackupVault resource instance state. + * Possible values: + * STATE_UNSPECIFIED + * CREATING + * ACTIVE + * DELETING + * ERROR + * + * @return builder + * + */ + public Builder state(String state) { + return state(Output.of(state)); + } + + /** + * @param totalStoredBytes Output only. Total size of the storage used by all backup resources. + * + * @return builder + * + */ + public Builder totalStoredBytes(@Nullable Output totalStoredBytes) { + $.totalStoredBytes = totalStoredBytes; + return this; + } + + /** + * @param totalStoredBytes Output only. Total size of the storage used by all backup resources. + * + * @return builder + * + */ + public Builder totalStoredBytes(String totalStoredBytes) { + return totalStoredBytes(Output.of(totalStoredBytes)); + } + + /** + * @param uid Output only. Output only Immutable after resource creation until resource deletion. + * + * @return builder + * + */ + public Builder uid(@Nullable Output uid) { + $.uid = uid; + return this; + } + + /** + * @param uid Output only. Output only Immutable after resource creation until resource deletion. + * + * @return builder + * + */ + public Builder uid(String uid) { + return uid(Output.of(uid)); + } + + /** + * @param updateTime Output only. The time when the instance was updated. + * + * @return builder + * + */ + public Builder updateTime(@Nullable Output updateTime) { + $.updateTime = updateTime; + return this; + } + + /** + * @param updateTime Output only. The time when the instance was updated. + * + * @return builder + * + */ + public Builder updateTime(String updateTime) { + return updateTime(Output.of(updateTime)); + } + + public BackupVaultState build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigquery/DataTransferConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/bigquery/DataTransferConfig.java index 961778329a..0e5669b2d5 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/bigquery/DataTransferConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigquery/DataTransferConfig.java @@ -11,6 +11,7 @@ import com.pulumi.gcp.bigquery.DataTransferConfigArgs; import com.pulumi.gcp.bigquery.inputs.DataTransferConfigState; import com.pulumi.gcp.bigquery.outputs.DataTransferConfigEmailPreferences; +import com.pulumi.gcp.bigquery.outputs.DataTransferConfigEncryptionConfiguration; import com.pulumi.gcp.bigquery.outputs.DataTransferConfigScheduleOptions; import com.pulumi.gcp.bigquery.outputs.DataTransferConfigSensitiveParams; import java.lang.Boolean; @@ -101,6 +102,93 @@ * } * * <!--End PulumiCodeChooser --> + * ### Bigquerydatatransfer Config Cmek + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.organizations.OrganizationsFunctions;
+ * import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
+ * import com.pulumi.gcp.projects.IAMMember;
+ * import com.pulumi.gcp.projects.IAMMemberArgs;
+ * import com.pulumi.gcp.bigquery.Dataset;
+ * import com.pulumi.gcp.bigquery.DatasetArgs;
+ * import com.pulumi.gcp.kms.KeyRing;
+ * import com.pulumi.gcp.kms.KeyRingArgs;
+ * import com.pulumi.gcp.kms.CryptoKey;
+ * import com.pulumi.gcp.kms.CryptoKeyArgs;
+ * import com.pulumi.gcp.bigquery.DataTransferConfig;
+ * import com.pulumi.gcp.bigquery.DataTransferConfigArgs;
+ * import com.pulumi.gcp.bigquery.inputs.DataTransferConfigEncryptionConfigurationArgs;
+ * import com.pulumi.resources.CustomResourceOptions;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App }{{@code
+ *     public static void main(String[] args) }{{@code
+ *         Pulumi.run(App::stack);
+ *     }}{@code
+ * 
+ *     public static void stack(Context ctx) }{{@code
+ *         final var project = OrganizationsFunctions.getProject();
+ * 
+ *         var permissions = new IAMMember("permissions", IAMMemberArgs.builder()
+ *             .project(project.applyValue(getProjectResult -> getProjectResult.projectId()))
+ *             .role("roles/iam.serviceAccountTokenCreator")
+ *             .member(String.format("serviceAccount:service-%s}{@literal @}{@code gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com", project.applyValue(getProjectResult -> getProjectResult.number())))
+ *             .build());
+ * 
+ *         var myDataset = new Dataset("myDataset", DatasetArgs.builder()
+ *             .datasetId("example_dataset")
+ *             .friendlyName("foo")
+ *             .description("bar")
+ *             .location("asia-northeast1")
+ *             .build(), CustomResourceOptions.builder()
+ *                 .dependsOn(permissions)
+ *                 .build());
+ * 
+ *         var keyRing = new KeyRing("keyRing", KeyRingArgs.builder()
+ *             .name("example-keyring")
+ *             .location("us")
+ *             .build());
+ * 
+ *         var cryptoKey = new CryptoKey("cryptoKey", CryptoKeyArgs.builder()
+ *             .name("example-key")
+ *             .keyRing(keyRing.id())
+ *             .build());
+ * 
+ *         var queryConfigCmek = new DataTransferConfig("queryConfigCmek", DataTransferConfigArgs.builder()
+ *             .displayName("")
+ *             .location("asia-northeast1")
+ *             .dataSourceId("scheduled_query")
+ *             .schedule("first sunday of quarter 00:00")
+ *             .destinationDatasetId(myDataset.datasetId())
+ *             .params(Map.ofEntries(
+ *                 Map.entry("destination_table_name_template", "my_table"),
+ *                 Map.entry("write_disposition", "WRITE_APPEND"),
+ *                 Map.entry("query", "SELECT name FROM tabl WHERE x = 'y'")
+ *             ))
+ *             .encryptionConfiguration(DataTransferConfigEncryptionConfigurationArgs.builder()
+ *                 .kmsKeyName(cryptoKey.id())
+ *                 .build())
+ *             .build(), CustomResourceOptions.builder()
+ *                 .dependsOn(permissions)
+ *                 .build());
+ * 
+ *     }}{@code
+ * }}{@code
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> * ### Bigquerydatatransfer Config Salesforce * * <!--Start PulumiCodeChooser --> @@ -147,9 +235,7 @@ * .params(Map.ofEntries( * Map.entry("connector.authentication.oauth.clientId", "client-id"), * Map.entry("connector.authentication.oauth.clientSecret", "client-secret"), - * Map.entry("connector.authentication.username", "username"), - * Map.entry("connector.authentication.password", "password"), - * Map.entry("connector.authentication.securityToken", "security-token"), + * Map.entry("connector.authentication.oauth.myDomain", "MyDomainName"), * Map.entry("assets", "[\"asset-a\",\"asset-b\"]") * )) * .build()); @@ -271,6 +357,22 @@ public Output displayName() { public Output> emailPreferences() { return Codegen.optional(this.emailPreferences); } + /** + * Represents the encryption configuration for a transfer. + * Structure is documented below. + * + */ + @Export(name="encryptionConfiguration", refs={DataTransferConfigEncryptionConfiguration.class}, tree="[0]") + private Output encryptionConfiguration; + + /** + * @return Represents the encryption configuration for a transfer. + * Structure is documented below. + * + */ + public Output> encryptionConfiguration() { + return Codegen.optional(this.encryptionConfiguration); + } /** * The geographic location where the transfer config should reside. * Examples: US, EU, asia-northeast1. The default value is US. diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigquery/DataTransferConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/bigquery/DataTransferConfigArgs.java index fcde8beced..2ca5fb5496 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/bigquery/DataTransferConfigArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigquery/DataTransferConfigArgs.java @@ -7,6 +7,7 @@ import com.pulumi.core.annotations.Import; import com.pulumi.exceptions.MissingRequiredPropertyException; import com.pulumi.gcp.bigquery.inputs.DataTransferConfigEmailPreferencesArgs; +import com.pulumi.gcp.bigquery.inputs.DataTransferConfigEncryptionConfigurationArgs; import com.pulumi.gcp.bigquery.inputs.DataTransferConfigScheduleOptionsArgs; import com.pulumi.gcp.bigquery.inputs.DataTransferConfigSensitiveParamsArgs; import java.lang.Boolean; @@ -124,6 +125,23 @@ public Optional> emailPreferences return Optional.ofNullable(this.emailPreferences); } + /** + * Represents the encryption configuration for a transfer. + * Structure is documented below. + * + */ + @Import(name="encryptionConfiguration") + private @Nullable Output encryptionConfiguration; + + /** + * @return Represents the encryption configuration for a transfer. + * Structure is documented below. + * + */ + public Optional> encryptionConfiguration() { + return Optional.ofNullable(this.encryptionConfiguration); + } + /** * The geographic location where the transfer config should reside. * Examples: US, EU, asia-northeast1. The default value is US. @@ -303,6 +321,7 @@ private DataTransferConfigArgs(DataTransferConfigArgs $) { this.disabled = $.disabled; this.displayName = $.displayName; this.emailPreferences = $.emailPreferences; + this.encryptionConfiguration = $.encryptionConfiguration; this.location = $.location; this.notificationPubsubTopic = $.notificationPubsubTopic; this.params = $.params; @@ -469,6 +488,29 @@ public Builder emailPreferences(DataTransferConfigEmailPreferencesArgs emailPref return emailPreferences(Output.of(emailPreferences)); } + /** + * @param encryptionConfiguration Represents the encryption configuration for a transfer. + * Structure is documented below. + * + * @return builder + * + */ + public Builder encryptionConfiguration(@Nullable Output encryptionConfiguration) { + $.encryptionConfiguration = encryptionConfiguration; + return this; + } + + /** + * @param encryptionConfiguration Represents the encryption configuration for a transfer. + * Structure is documented below. + * + * @return builder + * + */ + public Builder encryptionConfiguration(DataTransferConfigEncryptionConfigurationArgs encryptionConfiguration) { + return encryptionConfiguration(Output.of(encryptionConfiguration)); + } + /** * @param location The geographic location where the transfer config should reside. * Examples: US, EU, asia-northeast1. The default value is US. diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigquery/inputs/DataTransferConfigEncryptionConfigurationArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/bigquery/inputs/DataTransferConfigEncryptionConfigurationArgs.java new file mode 100644 index 0000000000..0eb4aaf704 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigquery/inputs/DataTransferConfigEncryptionConfigurationArgs.java @@ -0,0 +1,85 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.bigquery.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + + +public final class DataTransferConfigEncryptionConfigurationArgs extends com.pulumi.resources.ResourceArgs { + + public static final DataTransferConfigEncryptionConfigurationArgs Empty = new DataTransferConfigEncryptionConfigurationArgs(); + + /** + * The name of the KMS key used for encrypting BigQuery data. + * + */ + @Import(name="kmsKeyName", required=true) + private Output kmsKeyName; + + /** + * @return The name of the KMS key used for encrypting BigQuery data. + * + */ + public Output kmsKeyName() { + return this.kmsKeyName; + } + + private DataTransferConfigEncryptionConfigurationArgs() {} + + private DataTransferConfigEncryptionConfigurationArgs(DataTransferConfigEncryptionConfigurationArgs $) { + this.kmsKeyName = $.kmsKeyName; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(DataTransferConfigEncryptionConfigurationArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private DataTransferConfigEncryptionConfigurationArgs $; + + public Builder() { + $ = new DataTransferConfigEncryptionConfigurationArgs(); + } + + public Builder(DataTransferConfigEncryptionConfigurationArgs defaults) { + $ = new DataTransferConfigEncryptionConfigurationArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param kmsKeyName The name of the KMS key used for encrypting BigQuery data. + * + * @return builder + * + */ + public Builder kmsKeyName(Output kmsKeyName) { + $.kmsKeyName = kmsKeyName; + return this; + } + + /** + * @param kmsKeyName The name of the KMS key used for encrypting BigQuery data. + * + * @return builder + * + */ + public Builder kmsKeyName(String kmsKeyName) { + return kmsKeyName(Output.of(kmsKeyName)); + } + + public DataTransferConfigEncryptionConfigurationArgs build() { + if ($.kmsKeyName == null) { + throw new MissingRequiredPropertyException("DataTransferConfigEncryptionConfigurationArgs", "kmsKeyName"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigquery/inputs/DataTransferConfigState.java b/sdk/java/src/main/java/com/pulumi/gcp/bigquery/inputs/DataTransferConfigState.java index 90148d5008..94055e36ed 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/bigquery/inputs/DataTransferConfigState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigquery/inputs/DataTransferConfigState.java @@ -6,6 +6,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; import com.pulumi.gcp.bigquery.inputs.DataTransferConfigEmailPreferencesArgs; +import com.pulumi.gcp.bigquery.inputs.DataTransferConfigEncryptionConfigurationArgs; import com.pulumi.gcp.bigquery.inputs.DataTransferConfigScheduleOptionsArgs; import com.pulumi.gcp.bigquery.inputs.DataTransferConfigSensitiveParamsArgs; import java.lang.Boolean; @@ -123,6 +124,23 @@ public Optional> emailPreferences return Optional.ofNullable(this.emailPreferences); } + /** + * Represents the encryption configuration for a transfer. + * Structure is documented below. + * + */ + @Import(name="encryptionConfiguration") + private @Nullable Output encryptionConfiguration; + + /** + * @return Represents the encryption configuration for a transfer. + * Structure is documented below. + * + */ + public Optional> encryptionConfiguration() { + return Optional.ofNullable(this.encryptionConfiguration); + } + /** * The geographic location where the transfer config should reside. * Examples: US, EU, asia-northeast1. The default value is US. @@ -325,6 +343,7 @@ private DataTransferConfigState(DataTransferConfigState $) { this.disabled = $.disabled; this.displayName = $.displayName; this.emailPreferences = $.emailPreferences; + this.encryptionConfiguration = $.encryptionConfiguration; this.location = $.location; this.name = $.name; this.notificationPubsubTopic = $.notificationPubsubTopic; @@ -492,6 +511,29 @@ public Builder emailPreferences(DataTransferConfigEmailPreferencesArgs emailPref return emailPreferences(Output.of(emailPreferences)); } + /** + * @param encryptionConfiguration Represents the encryption configuration for a transfer. + * Structure is documented below. + * + * @return builder + * + */ + public Builder encryptionConfiguration(@Nullable Output encryptionConfiguration) { + $.encryptionConfiguration = encryptionConfiguration; + return this; + } + + /** + * @param encryptionConfiguration Represents the encryption configuration for a transfer. + * Structure is documented below. + * + * @return builder + * + */ + public Builder encryptionConfiguration(DataTransferConfigEncryptionConfigurationArgs encryptionConfiguration) { + return encryptionConfiguration(Output.of(encryptionConfiguration)); + } + /** * @param location The geographic location where the transfer config should reside. * Examples: US, EU, asia-northeast1. The default value is US. diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigquery/outputs/DataTransferConfigEncryptionConfiguration.java b/sdk/java/src/main/java/com/pulumi/gcp/bigquery/outputs/DataTransferConfigEncryptionConfiguration.java new file mode 100644 index 0000000000..2e4992f7f2 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigquery/outputs/DataTransferConfigEncryptionConfiguration.java @@ -0,0 +1,58 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.bigquery.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class DataTransferConfigEncryptionConfiguration { + /** + * @return The name of the KMS key used for encrypting BigQuery data. + * + */ + private String kmsKeyName; + + private DataTransferConfigEncryptionConfiguration() {} + /** + * @return The name of the KMS key used for encrypting BigQuery data. + * + */ + public String kmsKeyName() { + return this.kmsKeyName; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(DataTransferConfigEncryptionConfiguration defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String kmsKeyName; + public Builder() {} + public Builder(DataTransferConfigEncryptionConfiguration defaults) { + Objects.requireNonNull(defaults); + this.kmsKeyName = defaults.kmsKeyName; + } + + @CustomType.Setter + public Builder kmsKeyName(String kmsKeyName) { + if (kmsKeyName == null) { + throw new MissingRequiredPropertyException("DataTransferConfigEncryptionConfiguration", "kmsKeyName"); + } + this.kmsKeyName = kmsKeyName; + return this; + } + public DataTransferConfigEncryptionConfiguration build() { + final var _resultValue = new DataTransferConfigEncryptionConfiguration(); + _resultValue.kmsKeyName = kmsKeyName; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/DataExchange.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/DataExchange.java index ca36e79f91..ca95717d78 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/DataExchange.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/DataExchange.java @@ -10,6 +10,7 @@ import com.pulumi.gcp.Utilities; import com.pulumi.gcp.bigqueryanalyticshub.DataExchangeArgs; import com.pulumi.gcp.bigqueryanalyticshub.inputs.DataExchangeState; +import com.pulumi.gcp.bigqueryanalyticshub.outputs.DataExchangeSharingEnvironmentConfig; import java.lang.Integer; import java.lang.String; import java.util.Optional; @@ -63,6 +64,48 @@ * } * * <!--End PulumiCodeChooser --> + * ### Bigquery Analyticshub Data Exchange Dcr + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.bigqueryanalyticshub.DataExchange;
+ * import com.pulumi.gcp.bigqueryanalyticshub.DataExchangeArgs;
+ * import com.pulumi.gcp.bigqueryanalyticshub.inputs.DataExchangeSharingEnvironmentConfigArgs;
+ * import com.pulumi.gcp.bigqueryanalyticshub.inputs.DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var dataExchange = new DataExchange("dataExchange", DataExchangeArgs.builder()
+ *             .location("US")
+ *             .dataExchangeId("dcr_data_exchange")
+ *             .displayName("dcr_data_exchange")
+ *             .description("example dcr data exchange")
+ *             .sharingEnvironmentConfig(DataExchangeSharingEnvironmentConfigArgs.builder()
+ *                 .dcrExchangeConfig()
+ *                 .build())
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> * * ## Import * @@ -245,6 +288,24 @@ public Output> primaryContact() { public Output project() { return this.project; } + /** + * Configurable data sharing environment option for a data exchange. + * This field is required for data clean room exchanges. + * Structure is documented below. + * + */ + @Export(name="sharingEnvironmentConfig", refs={DataExchangeSharingEnvironmentConfig.class}, tree="[0]") + private Output sharingEnvironmentConfig; + + /** + * @return Configurable data sharing environment option for a data exchange. + * This field is required for data clean room exchanges. + * Structure is documented below. + * + */ + public Output sharingEnvironmentConfig() { + return this.sharingEnvironmentConfig; + } /** * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/DataExchangeArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/DataExchangeArgs.java index 255b5c6bd3..c4852b1949 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/DataExchangeArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/DataExchangeArgs.java @@ -6,6 +6,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.bigqueryanalyticshub.inputs.DataExchangeSharingEnvironmentConfigArgs; import java.lang.String; import java.util.Objects; import java.util.Optional; @@ -142,6 +143,25 @@ public Optional> project() { return Optional.ofNullable(this.project); } + /** + * Configurable data sharing environment option for a data exchange. + * This field is required for data clean room exchanges. + * Structure is documented below. + * + */ + @Import(name="sharingEnvironmentConfig") + private @Nullable Output sharingEnvironmentConfig; + + /** + * @return Configurable data sharing environment option for a data exchange. + * This field is required for data clean room exchanges. + * Structure is documented below. + * + */ + public Optional> sharingEnvironmentConfig() { + return Optional.ofNullable(this.sharingEnvironmentConfig); + } + private DataExchangeArgs() {} private DataExchangeArgs(DataExchangeArgs $) { @@ -153,6 +173,7 @@ private DataExchangeArgs(DataExchangeArgs $) { this.location = $.location; this.primaryContact = $.primaryContact; this.project = $.project; + this.sharingEnvironmentConfig = $.sharingEnvironmentConfig; } public static Builder builder() { @@ -347,6 +368,31 @@ public Builder project(String project) { return project(Output.of(project)); } + /** + * @param sharingEnvironmentConfig Configurable data sharing environment option for a data exchange. + * This field is required for data clean room exchanges. + * Structure is documented below. + * + * @return builder + * + */ + public Builder sharingEnvironmentConfig(@Nullable Output sharingEnvironmentConfig) { + $.sharingEnvironmentConfig = sharingEnvironmentConfig; + return this; + } + + /** + * @param sharingEnvironmentConfig Configurable data sharing environment option for a data exchange. + * This field is required for data clean room exchanges. + * Structure is documented below. + * + * @return builder + * + */ + public Builder sharingEnvironmentConfig(DataExchangeSharingEnvironmentConfigArgs sharingEnvironmentConfig) { + return sharingEnvironmentConfig(Output.of(sharingEnvironmentConfig)); + } + public DataExchangeArgs build() { if ($.dataExchangeId == null) { throw new MissingRequiredPropertyException("DataExchangeArgs", "dataExchangeId"); diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/Listing.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/Listing.java index 90491eb9bb..c9e889b304 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/Listing.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/Listing.java @@ -155,6 +155,105 @@ * } * * <!--End PulumiCodeChooser --> + * ### Bigquery Analyticshub Listing Dcr + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.bigqueryanalyticshub.DataExchange;
+ * import com.pulumi.gcp.bigqueryanalyticshub.DataExchangeArgs;
+ * import com.pulumi.gcp.bigqueryanalyticshub.inputs.DataExchangeSharingEnvironmentConfigArgs;
+ * import com.pulumi.gcp.bigqueryanalyticshub.inputs.DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs;
+ * import com.pulumi.gcp.bigquery.Dataset;
+ * import com.pulumi.gcp.bigquery.DatasetArgs;
+ * import com.pulumi.gcp.bigquery.Table;
+ * import com.pulumi.gcp.bigquery.TableArgs;
+ * import com.pulumi.gcp.bigqueryanalyticshub.Listing;
+ * import com.pulumi.gcp.bigqueryanalyticshub.ListingArgs;
+ * import com.pulumi.gcp.bigqueryanalyticshub.inputs.ListingBigqueryDatasetArgs;
+ * import com.pulumi.gcp.bigqueryanalyticshub.inputs.ListingRestrictedExportConfigArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var listing = new DataExchange("listing", DataExchangeArgs.builder()
+ *             .location("US")
+ *             .dataExchangeId("dcr_data_exchange")
+ *             .displayName("dcr_data_exchange")
+ *             .description("example dcr data exchange")
+ *             .sharingEnvironmentConfig(DataExchangeSharingEnvironmentConfigArgs.builder()
+ *                 .dcrExchangeConfig()
+ *                 .build())
+ *             .build());
+ * 
+ *         var listingDataset = new Dataset("listingDataset", DatasetArgs.builder()
+ *             .datasetId("dcr_listing")
+ *             .friendlyName("dcr_listing")
+ *             .description("example dcr data exchange")
+ *             .location("US")
+ *             .build());
+ * 
+ *         var listingTable = new Table("listingTable", TableArgs.builder()
+ *             .deletionProtection(false)
+ *             .tableId("dcr_listing")
+ *             .datasetId(listingDataset.datasetId())
+ *             .schema("""
+ * [
+ *   {
+ *     "name": "name",
+ *     "type": "STRING",
+ *     "mode": "NULLABLE"
+ *   },
+ *   {
+ *     "name": "post_abbr",
+ *     "type": "STRING",
+ *     "mode": "NULLABLE"
+ *   },
+ *   {
+ *     "name": "date",
+ *     "type": "DATE",
+ *     "mode": "NULLABLE"
+ *   }
+ * ]
+ *             """)
+ *             .build());
+ * 
+ *         var listingListing = new Listing("listingListing", ListingArgs.builder()
+ *             .location("US")
+ *             .dataExchangeId(listing.dataExchangeId())
+ *             .listingId("dcr_listing")
+ *             .displayName("dcr_listing")
+ *             .description("example dcr data exchange")
+ *             .bigqueryDataset(ListingBigqueryDatasetArgs.builder()
+ *                 .dataset(listingDataset.id())
+ *                 .selectedResources(ListingBigqueryDatasetSelectedResourceArgs.builder()
+ *                     .table(listingTable.id())
+ *                     .build())
+ *                 .build())
+ *             .restrictedExportConfig(ListingRestrictedExportConfigArgs.builder()
+ *                 .enabled(true)
+ *                 .build())
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> * * ## Import * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeSharingEnvironmentConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeSharingEnvironmentConfigArgs.java new file mode 100644 index 0000000000..3fc73036bd --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeSharingEnvironmentConfigArgs.java @@ -0,0 +1,121 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.bigqueryanalyticshub.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.gcp.bigqueryanalyticshub.inputs.DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs; +import com.pulumi.gcp.bigqueryanalyticshub.inputs.DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class DataExchangeSharingEnvironmentConfigArgs extends com.pulumi.resources.ResourceArgs { + + public static final DataExchangeSharingEnvironmentConfigArgs Empty = new DataExchangeSharingEnvironmentConfigArgs(); + + /** + * Data Clean Room (DCR), used for privacy-safe and secured data sharing. + * + */ + @Import(name="dcrExchangeConfig") + private @Nullable Output dcrExchangeConfig; + + /** + * @return Data Clean Room (DCR), used for privacy-safe and secured data sharing. + * + */ + public Optional> dcrExchangeConfig() { + return Optional.ofNullable(this.dcrExchangeConfig); + } + + /** + * Default Analytics Hub data exchange, used for secured data sharing. + * + */ + @Import(name="defaultExchangeConfig") + private @Nullable Output defaultExchangeConfig; + + /** + * @return Default Analytics Hub data exchange, used for secured data sharing. + * + */ + public Optional> defaultExchangeConfig() { + return Optional.ofNullable(this.defaultExchangeConfig); + } + + private DataExchangeSharingEnvironmentConfigArgs() {} + + private DataExchangeSharingEnvironmentConfigArgs(DataExchangeSharingEnvironmentConfigArgs $) { + this.dcrExchangeConfig = $.dcrExchangeConfig; + this.defaultExchangeConfig = $.defaultExchangeConfig; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(DataExchangeSharingEnvironmentConfigArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private DataExchangeSharingEnvironmentConfigArgs $; + + public Builder() { + $ = new DataExchangeSharingEnvironmentConfigArgs(); + } + + public Builder(DataExchangeSharingEnvironmentConfigArgs defaults) { + $ = new DataExchangeSharingEnvironmentConfigArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param dcrExchangeConfig Data Clean Room (DCR), used for privacy-safe and secured data sharing. + * + * @return builder + * + */ + public Builder dcrExchangeConfig(@Nullable Output dcrExchangeConfig) { + $.dcrExchangeConfig = dcrExchangeConfig; + return this; + } + + /** + * @param dcrExchangeConfig Data Clean Room (DCR), used for privacy-safe and secured data sharing. + * + * @return builder + * + */ + public Builder dcrExchangeConfig(DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs dcrExchangeConfig) { + return dcrExchangeConfig(Output.of(dcrExchangeConfig)); + } + + /** + * @param defaultExchangeConfig Default Analytics Hub data exchange, used for secured data sharing. + * + * @return builder + * + */ + public Builder defaultExchangeConfig(@Nullable Output defaultExchangeConfig) { + $.defaultExchangeConfig = defaultExchangeConfig; + return this; + } + + /** + * @param defaultExchangeConfig Default Analytics Hub data exchange, used for secured data sharing. + * + * @return builder + * + */ + public Builder defaultExchangeConfig(DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs defaultExchangeConfig) { + return defaultExchangeConfig(Output.of(defaultExchangeConfig)); + } + + public DataExchangeSharingEnvironmentConfigArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs.java new file mode 100644 index 0000000000..8acb7db7a4 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs.java @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.bigqueryanalyticshub.inputs; + + + + +public final class DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs extends com.pulumi.resources.ResourceArgs { + + public static final DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs Empty = new DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs(); + + public static Builder builder() { + return new Builder(); + } + + public static final class Builder { + private DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs $; + + public Builder() { + $ = new DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs(); + } + public DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs.java new file mode 100644 index 0000000000..13cd2f241b --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs.java @@ -0,0 +1,28 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.bigqueryanalyticshub.inputs; + + + + +public final class DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs extends com.pulumi.resources.ResourceArgs { + + public static final DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs Empty = new DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs(); + + public static Builder builder() { + return new Builder(); + } + + public static final class Builder { + private DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs $; + + public Builder() { + $ = new DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs(); + } + public DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeState.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeState.java index e0e182ed48..80e783c25f 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/DataExchangeState.java @@ -5,6 +5,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; +import com.pulumi.gcp.bigqueryanalyticshub.inputs.DataExchangeSharingEnvironmentConfigArgs; import java.lang.Integer; import java.lang.String; import java.util.Objects; @@ -174,6 +175,25 @@ public Optional> project() { return Optional.ofNullable(this.project); } + /** + * Configurable data sharing environment option for a data exchange. + * This field is required for data clean room exchanges. + * Structure is documented below. + * + */ + @Import(name="sharingEnvironmentConfig") + private @Nullable Output sharingEnvironmentConfig; + + /** + * @return Configurable data sharing environment option for a data exchange. + * This field is required for data clean room exchanges. + * Structure is documented below. + * + */ + public Optional> sharingEnvironmentConfig() { + return Optional.ofNullable(this.sharingEnvironmentConfig); + } + private DataExchangeState() {} private DataExchangeState(DataExchangeState $) { @@ -187,6 +207,7 @@ private DataExchangeState(DataExchangeState $) { this.name = $.name; this.primaryContact = $.primaryContact; this.project = $.project; + this.sharingEnvironmentConfig = $.sharingEnvironmentConfig; } public static Builder builder() { @@ -425,6 +446,31 @@ public Builder project(String project) { return project(Output.of(project)); } + /** + * @param sharingEnvironmentConfig Configurable data sharing environment option for a data exchange. + * This field is required for data clean room exchanges. + * Structure is documented below. + * + * @return builder + * + */ + public Builder sharingEnvironmentConfig(@Nullable Output sharingEnvironmentConfig) { + $.sharingEnvironmentConfig = sharingEnvironmentConfig; + return this; + } + + /** + * @param sharingEnvironmentConfig Configurable data sharing environment option for a data exchange. + * This field is required for data clean room exchanges. + * Structure is documented below. + * + * @return builder + * + */ + public Builder sharingEnvironmentConfig(DataExchangeSharingEnvironmentConfigArgs sharingEnvironmentConfig) { + return sharingEnvironmentConfig(Output.of(sharingEnvironmentConfig)); + } + public DataExchangeState build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/ListingBigqueryDatasetArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/ListingBigqueryDatasetArgs.java index 3b3dc78893..ee27cdee3d 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/ListingBigqueryDatasetArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/ListingBigqueryDatasetArgs.java @@ -6,8 +6,12 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.bigqueryanalyticshub.inputs.ListingBigqueryDatasetSelectedResourceArgs; import java.lang.String; +import java.util.List; import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; public final class ListingBigqueryDatasetArgs extends com.pulumi.resources.ResourceArgs { @@ -17,8 +21,6 @@ public final class ListingBigqueryDatasetArgs extends com.pulumi.resources.Resou /** * Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 * - * *** - * */ @Import(name="dataset", required=true) private Output dataset; @@ -26,17 +28,33 @@ public final class ListingBigqueryDatasetArgs extends com.pulumi.resources.Resou /** * @return Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 * - * *** - * */ public Output dataset() { return this.dataset; } + /** + * Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + * Structure is documented below. + * + */ + @Import(name="selectedResources") + private @Nullable Output> selectedResources; + + /** + * @return Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + * Structure is documented below. + * + */ + public Optional>> selectedResources() { + return Optional.ofNullable(this.selectedResources); + } + private ListingBigqueryDatasetArgs() {} private ListingBigqueryDatasetArgs(ListingBigqueryDatasetArgs $) { this.dataset = $.dataset; + this.selectedResources = $.selectedResources; } public static Builder builder() { @@ -60,8 +78,6 @@ public Builder(ListingBigqueryDatasetArgs defaults) { /** * @param dataset Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 * - * *** - * * @return builder * */ @@ -73,8 +89,6 @@ public Builder dataset(Output dataset) { /** * @param dataset Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 * - * *** - * * @return builder * */ @@ -82,6 +96,40 @@ public Builder dataset(String dataset) { return dataset(Output.of(dataset)); } + /** + * @param selectedResources Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + * Structure is documented below. + * + * @return builder + * + */ + public Builder selectedResources(@Nullable Output> selectedResources) { + $.selectedResources = selectedResources; + return this; + } + + /** + * @param selectedResources Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + * Structure is documented below. + * + * @return builder + * + */ + public Builder selectedResources(List selectedResources) { + return selectedResources(Output.of(selectedResources)); + } + + /** + * @param selectedResources Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + * Structure is documented below. + * + * @return builder + * + */ + public Builder selectedResources(ListingBigqueryDatasetSelectedResourceArgs... selectedResources) { + return selectedResources(List.of(selectedResources)); + } + public ListingBigqueryDatasetArgs build() { if ($.dataset == null) { throw new MissingRequiredPropertyException("ListingBigqueryDatasetArgs", "dataset"); diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/ListingBigqueryDatasetSelectedResourceArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/ListingBigqueryDatasetSelectedResourceArgs.java new file mode 100644 index 0000000000..e330cdcea2 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/ListingBigqueryDatasetSelectedResourceArgs.java @@ -0,0 +1,91 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.bigqueryanalyticshub.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class ListingBigqueryDatasetSelectedResourceArgs extends com.pulumi.resources.ResourceArgs { + + public static final ListingBigqueryDatasetSelectedResourceArgs Empty = new ListingBigqueryDatasetSelectedResourceArgs(); + + /** + * Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + * + * *** + * + */ + @Import(name="table") + private @Nullable Output table; + + /** + * @return Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + * + * *** + * + */ + public Optional> table() { + return Optional.ofNullable(this.table); + } + + private ListingBigqueryDatasetSelectedResourceArgs() {} + + private ListingBigqueryDatasetSelectedResourceArgs(ListingBigqueryDatasetSelectedResourceArgs $) { + this.table = $.table; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(ListingBigqueryDatasetSelectedResourceArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private ListingBigqueryDatasetSelectedResourceArgs $; + + public Builder() { + $ = new ListingBigqueryDatasetSelectedResourceArgs(); + } + + public Builder(ListingBigqueryDatasetSelectedResourceArgs defaults) { + $ = new ListingBigqueryDatasetSelectedResourceArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param table Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + * + * *** + * + * @return builder + * + */ + public Builder table(@Nullable Output table) { + $.table = table; + return this; + } + + /** + * @param table Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + * + * *** + * + * @return builder + * + */ + public Builder table(String table) { + return table(Output.of(table)); + } + + public ListingBigqueryDatasetSelectedResourceArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/ListingRestrictedExportConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/ListingRestrictedExportConfigArgs.java index b374cb2b0d..5adaf3f18e 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/ListingRestrictedExportConfigArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/inputs/ListingRestrictedExportConfigArgs.java @@ -30,6 +30,23 @@ public Optional> enabled() { return Optional.ofNullable(this.enabled); } + /** + * (Output) + * If true, restrict direct table access(read api/tabledata.list) on linked table. + * + */ + @Import(name="restrictDirectTableAccess") + private @Nullable Output restrictDirectTableAccess; + + /** + * @return (Output) + * If true, restrict direct table access(read api/tabledata.list) on linked table. + * + */ + public Optional> restrictDirectTableAccess() { + return Optional.ofNullable(this.restrictDirectTableAccess); + } + /** * If true, restrict export of query result derived from restricted linked dataset table. * @@ -49,6 +66,7 @@ private ListingRestrictedExportConfigArgs() {} private ListingRestrictedExportConfigArgs(ListingRestrictedExportConfigArgs $) { this.enabled = $.enabled; + this.restrictDirectTableAccess = $.restrictDirectTableAccess; this.restrictQueryResult = $.restrictQueryResult; } @@ -91,6 +109,29 @@ public Builder enabled(Boolean enabled) { return enabled(Output.of(enabled)); } + /** + * @param restrictDirectTableAccess (Output) + * If true, restrict direct table access(read api/tabledata.list) on linked table. + * + * @return builder + * + */ + public Builder restrictDirectTableAccess(@Nullable Output restrictDirectTableAccess) { + $.restrictDirectTableAccess = restrictDirectTableAccess; + return this; + } + + /** + * @param restrictDirectTableAccess (Output) + * If true, restrict direct table access(read api/tabledata.list) on linked table. + * + * @return builder + * + */ + public Builder restrictDirectTableAccess(Boolean restrictDirectTableAccess) { + return restrictDirectTableAccess(Output.of(restrictDirectTableAccess)); + } + /** * @param restrictQueryResult If true, restrict export of query result derived from restricted linked dataset table. * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/DataExchangeSharingEnvironmentConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/DataExchangeSharingEnvironmentConfig.java new file mode 100644 index 0000000000..914e2fe2e8 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/DataExchangeSharingEnvironmentConfig.java @@ -0,0 +1,79 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.bigqueryanalyticshub.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.gcp.bigqueryanalyticshub.outputs.DataExchangeSharingEnvironmentConfigDcrExchangeConfig; +import com.pulumi.gcp.bigqueryanalyticshub.outputs.DataExchangeSharingEnvironmentConfigDefaultExchangeConfig; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class DataExchangeSharingEnvironmentConfig { + /** + * @return Data Clean Room (DCR), used for privacy-safe and secured data sharing. + * + */ + private @Nullable DataExchangeSharingEnvironmentConfigDcrExchangeConfig dcrExchangeConfig; + /** + * @return Default Analytics Hub data exchange, used for secured data sharing. + * + */ + private @Nullable DataExchangeSharingEnvironmentConfigDefaultExchangeConfig defaultExchangeConfig; + + private DataExchangeSharingEnvironmentConfig() {} + /** + * @return Data Clean Room (DCR), used for privacy-safe and secured data sharing. + * + */ + public Optional dcrExchangeConfig() { + return Optional.ofNullable(this.dcrExchangeConfig); + } + /** + * @return Default Analytics Hub data exchange, used for secured data sharing. + * + */ + public Optional defaultExchangeConfig() { + return Optional.ofNullable(this.defaultExchangeConfig); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(DataExchangeSharingEnvironmentConfig defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable DataExchangeSharingEnvironmentConfigDcrExchangeConfig dcrExchangeConfig; + private @Nullable DataExchangeSharingEnvironmentConfigDefaultExchangeConfig defaultExchangeConfig; + public Builder() {} + public Builder(DataExchangeSharingEnvironmentConfig defaults) { + Objects.requireNonNull(defaults); + this.dcrExchangeConfig = defaults.dcrExchangeConfig; + this.defaultExchangeConfig = defaults.defaultExchangeConfig; + } + + @CustomType.Setter + public Builder dcrExchangeConfig(@Nullable DataExchangeSharingEnvironmentConfigDcrExchangeConfig dcrExchangeConfig) { + + this.dcrExchangeConfig = dcrExchangeConfig; + return this; + } + @CustomType.Setter + public Builder defaultExchangeConfig(@Nullable DataExchangeSharingEnvironmentConfigDefaultExchangeConfig defaultExchangeConfig) { + + this.defaultExchangeConfig = defaultExchangeConfig; + return this; + } + public DataExchangeSharingEnvironmentConfig build() { + final var _resultValue = new DataExchangeSharingEnvironmentConfig(); + _resultValue.dcrExchangeConfig = dcrExchangeConfig; + _resultValue.defaultExchangeConfig = defaultExchangeConfig; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfig.java new file mode 100644 index 0000000000..eb71b467a2 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/DataExchangeSharingEnvironmentConfigDcrExchangeConfig.java @@ -0,0 +1,32 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.bigqueryanalyticshub.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.util.Objects; + +@CustomType +public final class DataExchangeSharingEnvironmentConfigDcrExchangeConfig { + private DataExchangeSharingEnvironmentConfigDcrExchangeConfig() {} + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(DataExchangeSharingEnvironmentConfigDcrExchangeConfig defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + public Builder() {} + public Builder(DataExchangeSharingEnvironmentConfigDcrExchangeConfig defaults) { + Objects.requireNonNull(defaults); + } + + public DataExchangeSharingEnvironmentConfigDcrExchangeConfig build() { + final var _resultValue = new DataExchangeSharingEnvironmentConfigDcrExchangeConfig(); + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfig.java new file mode 100644 index 0000000000..82f5aed99d --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/DataExchangeSharingEnvironmentConfigDefaultExchangeConfig.java @@ -0,0 +1,32 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.bigqueryanalyticshub.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.util.Objects; + +@CustomType +public final class DataExchangeSharingEnvironmentConfigDefaultExchangeConfig { + private DataExchangeSharingEnvironmentConfigDefaultExchangeConfig() {} + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(DataExchangeSharingEnvironmentConfigDefaultExchangeConfig defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + public Builder() {} + public Builder(DataExchangeSharingEnvironmentConfigDefaultExchangeConfig defaults) { + Objects.requireNonNull(defaults); + } + + public DataExchangeSharingEnvironmentConfigDefaultExchangeConfig build() { + final var _resultValue = new DataExchangeSharingEnvironmentConfigDefaultExchangeConfig(); + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/ListingBigqueryDataset.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/ListingBigqueryDataset.java index fab3c7a340..28864969a0 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/ListingBigqueryDataset.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/ListingBigqueryDataset.java @@ -5,29 +5,42 @@ import com.pulumi.core.annotations.CustomType; import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.bigqueryanalyticshub.outputs.ListingBigqueryDatasetSelectedResource; import java.lang.String; +import java.util.List; import java.util.Objects; +import javax.annotation.Nullable; @CustomType public final class ListingBigqueryDataset { /** * @return Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 * - * *** - * */ private String dataset; + /** + * @return Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + * Structure is documented below. + * + */ + private @Nullable List selectedResources; private ListingBigqueryDataset() {} /** * @return Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 * - * *** - * */ public String dataset() { return this.dataset; } + /** + * @return Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + * Structure is documented below. + * + */ + public List selectedResources() { + return this.selectedResources == null ? List.of() : this.selectedResources; + } public static Builder builder() { return new Builder(); @@ -39,10 +52,12 @@ public static Builder builder(ListingBigqueryDataset defaults) { @CustomType.Builder public static final class Builder { private String dataset; + private @Nullable List selectedResources; public Builder() {} public Builder(ListingBigqueryDataset defaults) { Objects.requireNonNull(defaults); this.dataset = defaults.dataset; + this.selectedResources = defaults.selectedResources; } @CustomType.Setter @@ -53,9 +68,19 @@ public Builder dataset(String dataset) { this.dataset = dataset; return this; } + @CustomType.Setter + public Builder selectedResources(@Nullable List selectedResources) { + + this.selectedResources = selectedResources; + return this; + } + public Builder selectedResources(ListingBigqueryDatasetSelectedResource... selectedResources) { + return selectedResources(List.of(selectedResources)); + } public ListingBigqueryDataset build() { final var _resultValue = new ListingBigqueryDataset(); _resultValue.dataset = dataset; + _resultValue.selectedResources = selectedResources; return _resultValue; } } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/ListingBigqueryDatasetSelectedResource.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/ListingBigqueryDatasetSelectedResource.java new file mode 100644 index 0000000000..64fad1d957 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/ListingBigqueryDatasetSelectedResource.java @@ -0,0 +1,61 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.bigqueryanalyticshub.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class ListingBigqueryDatasetSelectedResource { + /** + * @return Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + * + * *** + * + */ + private @Nullable String table; + + private ListingBigqueryDatasetSelectedResource() {} + /** + * @return Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + * + * *** + * + */ + public Optional table() { + return Optional.ofNullable(this.table); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(ListingBigqueryDatasetSelectedResource defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String table; + public Builder() {} + public Builder(ListingBigqueryDatasetSelectedResource defaults) { + Objects.requireNonNull(defaults); + this.table = defaults.table; + } + + @CustomType.Setter + public Builder table(@Nullable String table) { + + this.table = table; + return this; + } + public ListingBigqueryDatasetSelectedResource build() { + final var _resultValue = new ListingBigqueryDatasetSelectedResource(); + _resultValue.table = table; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/ListingRestrictedExportConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/ListingRestrictedExportConfig.java index ec5dbdcb4c..aca691d414 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/ListingRestrictedExportConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigqueryanalyticshub/outputs/ListingRestrictedExportConfig.java @@ -16,6 +16,12 @@ public final class ListingRestrictedExportConfig { * */ private @Nullable Boolean enabled; + /** + * @return (Output) + * If true, restrict direct table access(read api/tabledata.list) on linked table. + * + */ + private @Nullable Boolean restrictDirectTableAccess; /** * @return If true, restrict export of query result derived from restricted linked dataset table. * @@ -30,6 +36,14 @@ private ListingRestrictedExportConfig() {} public Optional enabled() { return Optional.ofNullable(this.enabled); } + /** + * @return (Output) + * If true, restrict direct table access(read api/tabledata.list) on linked table. + * + */ + public Optional restrictDirectTableAccess() { + return Optional.ofNullable(this.restrictDirectTableAccess); + } /** * @return If true, restrict export of query result derived from restricted linked dataset table. * @@ -48,11 +62,13 @@ public static Builder builder(ListingRestrictedExportConfig defaults) { @CustomType.Builder public static final class Builder { private @Nullable Boolean enabled; + private @Nullable Boolean restrictDirectTableAccess; private @Nullable Boolean restrictQueryResult; public Builder() {} public Builder(ListingRestrictedExportConfig defaults) { Objects.requireNonNull(defaults); this.enabled = defaults.enabled; + this.restrictDirectTableAccess = defaults.restrictDirectTableAccess; this.restrictQueryResult = defaults.restrictQueryResult; } @@ -63,6 +79,12 @@ public Builder enabled(@Nullable Boolean enabled) { return this; } @CustomType.Setter + public Builder restrictDirectTableAccess(@Nullable Boolean restrictDirectTableAccess) { + + this.restrictDirectTableAccess = restrictDirectTableAccess; + return this; + } + @CustomType.Setter public Builder restrictQueryResult(@Nullable Boolean restrictQueryResult) { this.restrictQueryResult = restrictQueryResult; @@ -71,6 +93,7 @@ public Builder restrictQueryResult(@Nullable Boolean restrictQueryResult) { public ListingRestrictedExportConfig build() { final var _resultValue = new ListingRestrictedExportConfig(); _resultValue.enabled = enabled; + _resultValue.restrictDirectTableAccess = restrictDirectTableAccess; _resultValue.restrictQueryResult = restrictQueryResult; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigtable/Table.java b/sdk/java/src/main/java/com/pulumi/gcp/bigtable/Table.java index f94711099a..f275efc32a 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/bigtable/Table.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigtable/Table.java @@ -75,6 +75,24 @@ * .build(), * TableColumnFamilyArgs.builder() * .family("family-second") + * .type("intsum") + * .build(), + * TableColumnFamilyArgs.builder() + * .family("family-third") + * .type(""" + * { + * "aggregateType": { + * "max": {}, + * "inputType": { + * "int64Type": { + * "encoding": { + * "bigEndianBytes": {} + * } + * } + * } + * } + * } + * """) * .build()) * .changeStreamRetention("24h0m0s") * .automatedBackupPolicy(TableAutomatedBackupPolicyArgs.builder() diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigtable/inputs/TableColumnFamilyArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/bigtable/inputs/TableColumnFamilyArgs.java index 3df391b854..c498cb1299 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/bigtable/inputs/TableColumnFamilyArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigtable/inputs/TableColumnFamilyArgs.java @@ -8,6 +8,8 @@ import com.pulumi.exceptions.MissingRequiredPropertyException; import java.lang.String; import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; public final class TableColumnFamilyArgs extends com.pulumi.resources.ResourceArgs { @@ -29,10 +31,26 @@ public Output family() { return this.family; } + /** + * The type of the column family. + * + */ + @Import(name="type") + private @Nullable Output type; + + /** + * @return The type of the column family. + * + */ + public Optional> type() { + return Optional.ofNullable(this.type); + } + private TableColumnFamilyArgs() {} private TableColumnFamilyArgs(TableColumnFamilyArgs $) { this.family = $.family; + this.type = $.type; } public static Builder builder() { @@ -74,6 +92,27 @@ public Builder family(String family) { return family(Output.of(family)); } + /** + * @param type The type of the column family. + * + * @return builder + * + */ + public Builder type(@Nullable Output type) { + $.type = type; + return this; + } + + /** + * @param type The type of the column family. + * + * @return builder + * + */ + public Builder type(String type) { + return type(Output.of(type)); + } + public TableColumnFamilyArgs build() { if ($.family == null) { throw new MissingRequiredPropertyException("TableColumnFamilyArgs", "family"); diff --git a/sdk/java/src/main/java/com/pulumi/gcp/bigtable/outputs/TableColumnFamily.java b/sdk/java/src/main/java/com/pulumi/gcp/bigtable/outputs/TableColumnFamily.java index ad1a506823..ae623449e0 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/bigtable/outputs/TableColumnFamily.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/bigtable/outputs/TableColumnFamily.java @@ -7,6 +7,8 @@ import com.pulumi.exceptions.MissingRequiredPropertyException; import java.lang.String; import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; @CustomType public final class TableColumnFamily { @@ -15,6 +17,11 @@ public final class TableColumnFamily { * */ private String family; + /** + * @return The type of the column family. + * + */ + private @Nullable String type; private TableColumnFamily() {} /** @@ -24,6 +31,13 @@ private TableColumnFamily() {} public String family() { return this.family; } + /** + * @return The type of the column family. + * + */ + public Optional type() { + return Optional.ofNullable(this.type); + } public static Builder builder() { return new Builder(); @@ -35,10 +49,12 @@ public static Builder builder(TableColumnFamily defaults) { @CustomType.Builder public static final class Builder { private String family; + private @Nullable String type; public Builder() {} public Builder(TableColumnFamily defaults) { Objects.requireNonNull(defaults); this.family = defaults.family; + this.type = defaults.type; } @CustomType.Setter @@ -49,9 +65,16 @@ public Builder family(String family) { this.family = family; return this; } + @CustomType.Setter + public Builder type(@Nullable String type) { + + this.type = type; + return this; + } public TableColumnFamily build() { final var _resultValue = new TableColumnFamily(); _resultValue.family = family; + _resultValue.type = type; return _resultValue; } } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/certificateauthority/Authority.java b/sdk/java/src/main/java/com/pulumi/gcp/certificateauthority/Authority.java index 57b6099bf7..a340dc72f5 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/certificateauthority/Authority.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/certificateauthority/Authority.java @@ -552,14 +552,16 @@ public Output> deletionProtection() { return Codegen.optional(this.deletionProtection); } /** - * Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + * Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + * ENABLED, DISABLED, STAGED. * */ @Export(name="desiredState", refs={String.class}, tree="[0]") private Output desiredState; /** - * @return Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + * @return Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + * ENABLED, DISABLED, STAGED. * */ public Output> desiredState() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/certificateauthority/AuthorityArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/certificateauthority/AuthorityArgs.java index a3398ab8f8..d8a48ce362 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/certificateauthority/AuthorityArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/certificateauthority/AuthorityArgs.java @@ -61,14 +61,16 @@ public Optional> deletionProtection() { } /** - * Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + * Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + * ENABLED, DISABLED, STAGED. * */ @Import(name="desiredState") private @Nullable Output desiredState; /** - * @return Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + * @return Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + * ENABLED, DISABLED, STAGED. * */ public Optional> desiredState() { @@ -372,7 +374,8 @@ public Builder deletionProtection(Boolean deletionProtection) { } /** - * @param desiredState Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + * @param desiredState Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + * ENABLED, DISABLED, STAGED. * * @return builder * @@ -383,7 +386,8 @@ public Builder desiredState(@Nullable Output desiredState) { } /** - * @param desiredState Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + * @param desiredState Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + * ENABLED, DISABLED, STAGED. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/certificateauthority/inputs/AuthorityState.java b/sdk/java/src/main/java/com/pulumi/gcp/certificateauthority/inputs/AuthorityState.java index c5d5520e6d..58e497989a 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/certificateauthority/inputs/AuthorityState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/certificateauthority/inputs/AuthorityState.java @@ -98,14 +98,16 @@ public Optional> deletionProtection() { } /** - * Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + * Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + * ENABLED, DISABLED, STAGED. * */ @Import(name="desiredState") private @Nullable Output desiredState; /** - * @return Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + * @return Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + * ENABLED, DISABLED, STAGED. * */ public Optional> desiredState() { @@ -580,7 +582,8 @@ public Builder deletionProtection(Boolean deletionProtection) { } /** - * @param desiredState Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + * @param desiredState Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + * ENABLED, DISABLED, STAGED. * * @return builder * @@ -591,7 +594,8 @@ public Builder desiredState(@Nullable Output desiredState) { } /** - * @param desiredState Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + * @param desiredState Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + * ENABLED, DISABLED, STAGED. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/Certificate.java b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/Certificate.java index b0297bec29..7a2dec8126 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/Certificate.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/Certificate.java @@ -696,6 +696,20 @@ public Output project() { public Output> pulumiLabels() { return this.pulumiLabels; } + /** + * The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + * + */ + @Export(name="sanDnsnames", refs={List.class,String.class}, tree="[0,1]") + private Output> sanDnsnames; + + /** + * @return The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + * + */ + public Output> sanDnsnames() { + return this.sanDnsnames; + } /** * The scope of the certificate. * DEFAULT: Certificates with default scope are served from core Google data centers. diff --git a/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/CertificatemanagerFunctions.java b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/CertificatemanagerFunctions.java index a838957dae..1c9f455974 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/CertificatemanagerFunctions.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/CertificatemanagerFunctions.java @@ -10,7 +10,10 @@ import com.pulumi.gcp.Utilities; import com.pulumi.gcp.certificatemanager.inputs.GetCertificateMapArgs; import com.pulumi.gcp.certificatemanager.inputs.GetCertificateMapPlainArgs; +import com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs; +import com.pulumi.gcp.certificatemanager.inputs.GetCertificatesPlainArgs; import com.pulumi.gcp.certificatemanager.outputs.GetCertificateMapResult; +import com.pulumi.gcp.certificatemanager.outputs.GetCertificatesResult; import java.util.concurrent.CompletableFuture; public final class CertificatemanagerFunctions { @@ -182,4 +185,454 @@ public static Output getCertificateMap(GetCertificateMa public static CompletableFuture getCertificateMapPlain(GetCertificateMapPlainArgs args, InvokeOptions options) { return Deployment.getInstance().invokeAsync("gcp:certificatemanager/getCertificateMap:getCertificateMap", TypeShape.of(GetCertificateMapResult.class), args, Utilities.withVersion(options)); } + /** + * List all certificates within Google Certificate Manager for a given project, region or filter. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.certificatemanager.CertificatemanagerFunctions;
+     * import com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var default = CertificatemanagerFunctions.getCertificates();
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ### With A Filter + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.certificatemanager.CertificatemanagerFunctions;
+     * import com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var default = CertificatemanagerFunctions.getCertificates(GetCertificatesArgs.builder()
+     *             .filter("name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getCertificates() { + return getCertificates(GetCertificatesArgs.Empty, InvokeOptions.Empty); + } + /** + * List all certificates within Google Certificate Manager for a given project, region or filter. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.certificatemanager.CertificatemanagerFunctions;
+     * import com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var default = CertificatemanagerFunctions.getCertificates();
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ### With A Filter + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.certificatemanager.CertificatemanagerFunctions;
+     * import com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var default = CertificatemanagerFunctions.getCertificates(GetCertificatesArgs.builder()
+     *             .filter("name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static CompletableFuture getCertificatesPlain() { + return getCertificatesPlain(GetCertificatesPlainArgs.Empty, InvokeOptions.Empty); + } + /** + * List all certificates within Google Certificate Manager for a given project, region or filter. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.certificatemanager.CertificatemanagerFunctions;
+     * import com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var default = CertificatemanagerFunctions.getCertificates();
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ### With A Filter + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.certificatemanager.CertificatemanagerFunctions;
+     * import com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var default = CertificatemanagerFunctions.getCertificates(GetCertificatesArgs.builder()
+     *             .filter("name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getCertificates(GetCertificatesArgs args) { + return getCertificates(args, InvokeOptions.Empty); + } + /** + * List all certificates within Google Certificate Manager for a given project, region or filter. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.certificatemanager.CertificatemanagerFunctions;
+     * import com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var default = CertificatemanagerFunctions.getCertificates();
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ### With A Filter + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.certificatemanager.CertificatemanagerFunctions;
+     * import com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var default = CertificatemanagerFunctions.getCertificates(GetCertificatesArgs.builder()
+     *             .filter("name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static CompletableFuture getCertificatesPlain(GetCertificatesPlainArgs args) { + return getCertificatesPlain(args, InvokeOptions.Empty); + } + /** + * List all certificates within Google Certificate Manager for a given project, region or filter. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.certificatemanager.CertificatemanagerFunctions;
+     * import com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var default = CertificatemanagerFunctions.getCertificates();
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ### With A Filter + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.certificatemanager.CertificatemanagerFunctions;
+     * import com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var default = CertificatemanagerFunctions.getCertificates(GetCertificatesArgs.builder()
+     *             .filter("name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getCertificates(GetCertificatesArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("gcp:certificatemanager/getCertificates:getCertificates", TypeShape.of(GetCertificatesResult.class), args, Utilities.withVersion(options)); + } + /** + * List all certificates within Google Certificate Manager for a given project, region or filter. + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.certificatemanager.CertificatemanagerFunctions;
+     * import com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var default = CertificatemanagerFunctions.getCertificates();
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + * ### With A Filter + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.certificatemanager.CertificatemanagerFunctions;
+     * import com.pulumi.gcp.certificatemanager.inputs.GetCertificatesArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var default = CertificatemanagerFunctions.getCertificates(GetCertificatesArgs.builder()
+     *             .filter("name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*")
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static CompletableFuture getCertificatesPlain(GetCertificatesPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("gcp:certificatemanager/getCertificates:getCertificates", TypeShape.of(GetCertificatesResult.class), args, Utilities.withVersion(options)); + } } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/inputs/CertificateState.java b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/inputs/CertificateState.java index 079a259fbb..5fdbd28841 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/inputs/CertificateState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/inputs/CertificateState.java @@ -8,6 +8,7 @@ import com.pulumi.gcp.certificatemanager.inputs.CertificateManagedArgs; import com.pulumi.gcp.certificatemanager.inputs.CertificateSelfManagedArgs; import java.lang.String; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -160,6 +161,21 @@ public Optional>> pulumiLabels() { return Optional.ofNullable(this.pulumiLabels); } + /** + * The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + * + */ + @Import(name="sanDnsnames") + private @Nullable Output> sanDnsnames; + + /** + * @return The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + * + */ + public Optional>> sanDnsnames() { + return Optional.ofNullable(this.sanDnsnames); + } + /** * The scope of the certificate. * DEFAULT: Certificates with default scope are served from core Google data centers. @@ -219,6 +235,7 @@ private CertificateState(CertificateState $) { this.name = $.name; this.project = $.project; this.pulumiLabels = $.pulumiLabels; + this.sanDnsnames = $.sanDnsnames; this.scope = $.scope; this.selfManaged = $.selfManaged; } @@ -431,6 +448,37 @@ public Builder pulumiLabels(Map pulumiLabels) { return pulumiLabels(Output.of(pulumiLabels)); } + /** + * @param sanDnsnames The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + * + * @return builder + * + */ + public Builder sanDnsnames(@Nullable Output> sanDnsnames) { + $.sanDnsnames = sanDnsnames; + return this; + } + + /** + * @param sanDnsnames The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + * + * @return builder + * + */ + public Builder sanDnsnames(List sanDnsnames) { + return sanDnsnames(Output.of(sanDnsnames)); + } + + /** + * @param sanDnsnames The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + * + * @return builder + * + */ + public Builder sanDnsnames(String... sanDnsnames) { + return sanDnsnames(List.of(sanDnsnames)); + } + /** * @param scope The scope of the certificate. * DEFAULT: Certificates with default scope are served from core Google data centers. diff --git a/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/inputs/GetCertificatesArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/inputs/GetCertificatesArgs.java new file mode 100644 index 0000000000..b5a76a5cf1 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/inputs/GetCertificatesArgs.java @@ -0,0 +1,120 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.certificatemanager.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetCertificatesArgs extends com.pulumi.resources.InvokeArgs { + + public static final GetCertificatesArgs Empty = new GetCertificatesArgs(); + + /** + * Filter expression to restrict the certificates returned. + * + */ + @Import(name="filter") + private @Nullable Output filter; + + /** + * @return Filter expression to restrict the certificates returned. + * + */ + public Optional> filter() { + return Optional.ofNullable(this.filter); + } + + /** + * The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + * + */ + @Import(name="region") + private @Nullable Output region; + + /** + * @return The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + * + */ + public Optional> region() { + return Optional.ofNullable(this.region); + } + + private GetCertificatesArgs() {} + + private GetCertificatesArgs(GetCertificatesArgs $) { + this.filter = $.filter; + this.region = $.region; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetCertificatesArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetCertificatesArgs $; + + public Builder() { + $ = new GetCertificatesArgs(); + } + + public Builder(GetCertificatesArgs defaults) { + $ = new GetCertificatesArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param filter Filter expression to restrict the certificates returned. + * + * @return builder + * + */ + public Builder filter(@Nullable Output filter) { + $.filter = filter; + return this; + } + + /** + * @param filter Filter expression to restrict the certificates returned. + * + * @return builder + * + */ + public Builder filter(String filter) { + return filter(Output.of(filter)); + } + + /** + * @param region The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + * + * @return builder + * + */ + public Builder region(@Nullable Output region) { + $.region = region; + return this; + } + + /** + * @param region The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + * + * @return builder + * + */ + public Builder region(String region) { + return region(Output.of(region)); + } + + public GetCertificatesArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/inputs/GetCertificatesPlainArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/inputs/GetCertificatesPlainArgs.java new file mode 100644 index 0000000000..a176ca0f02 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/inputs/GetCertificatesPlainArgs.java @@ -0,0 +1,99 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.certificatemanager.inputs; + +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetCertificatesPlainArgs extends com.pulumi.resources.InvokeArgs { + + public static final GetCertificatesPlainArgs Empty = new GetCertificatesPlainArgs(); + + /** + * Filter expression to restrict the certificates returned. + * + */ + @Import(name="filter") + private @Nullable String filter; + + /** + * @return Filter expression to restrict the certificates returned. + * + */ + public Optional filter() { + return Optional.ofNullable(this.filter); + } + + /** + * The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + * + */ + @Import(name="region") + private @Nullable String region; + + /** + * @return The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + * + */ + public Optional region() { + return Optional.ofNullable(this.region); + } + + private GetCertificatesPlainArgs() {} + + private GetCertificatesPlainArgs(GetCertificatesPlainArgs $) { + this.filter = $.filter; + this.region = $.region; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetCertificatesPlainArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetCertificatesPlainArgs $; + + public Builder() { + $ = new GetCertificatesPlainArgs(); + } + + public Builder(GetCertificatesPlainArgs defaults) { + $ = new GetCertificatesPlainArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param filter Filter expression to restrict the certificates returned. + * + * @return builder + * + */ + public Builder filter(@Nullable String filter) { + $.filter = filter; + return this; + } + + /** + * @param region The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + * + * @return builder + * + */ + public Builder region(@Nullable String region) { + $.region = region; + return this; + } + + public GetCertificatesPlainArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificate.java b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificate.java new file mode 100644 index 0000000000..504c2bd22b --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificate.java @@ -0,0 +1,302 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.certificatemanager.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.certificatemanager.outputs.GetCertificatesCertificateManaged; +import java.lang.String; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +@CustomType +public final class GetCertificatesCertificate { + /** + * @return A human-readable description of the resource. + * + */ + private String description; + private Map effectiveLabels; + /** + * @return Set of label tags associated with the Certificate resource. + * + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field 'effective_labels' for all of the labels present on the resource. + * + */ + private Map labels; + /** + * @return The Certificate Manager location. If not specified, "global" is used. + * + */ + private String location; + /** + * @return Configuration and state of a Managed Certificate. + * Certificate Manager provisions and renews Managed Certificates + * automatically, for as long as it's authorized to do so. + * + */ + private List manageds; + /** + * @return A user-defined name of the certificate. Certificate names must be unique + * The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, + * and all following characters must be a dash, underscore, letter or digit. + * + */ + private String name; + /** + * @return The ID of the project in which the resource belongs. If it + * is not provided, the provider project is used. + * + */ + private String project; + /** + * @return The combination of labels configured directly on the resource + * and default labels configured on the provider. + * + */ + private Map pulumiLabels; + /** + * @return The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + * + */ + private List sanDnsnames; + /** + * @return The scope of the certificate. + * + * DEFAULT: Certificates with default scope are served from core Google data centers. + * If unsure, choose this option. + * + * EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence. + * See https://cloud.google.com/vpc/docs/edge-locations. + * + * ALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs). + * See https://cloud.google.com/compute/docs/regions-zones + * + */ + private String scope; + + private GetCertificatesCertificate() {} + /** + * @return A human-readable description of the resource. + * + */ + public String description() { + return this.description; + } + public Map effectiveLabels() { + return this.effectiveLabels; + } + /** + * @return Set of label tags associated with the Certificate resource. + * + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field 'effective_labels' for all of the labels present on the resource. + * + */ + public Map labels() { + return this.labels; + } + /** + * @return The Certificate Manager location. If not specified, "global" is used. + * + */ + public String location() { + return this.location; + } + /** + * @return Configuration and state of a Managed Certificate. + * Certificate Manager provisions and renews Managed Certificates + * automatically, for as long as it's authorized to do so. + * + */ + public List manageds() { + return this.manageds; + } + /** + * @return A user-defined name of the certificate. Certificate names must be unique + * The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, + * and all following characters must be a dash, underscore, letter or digit. + * + */ + public String name() { + return this.name; + } + /** + * @return The ID of the project in which the resource belongs. If it + * is not provided, the provider project is used. + * + */ + public String project() { + return this.project; + } + /** + * @return The combination of labels configured directly on the resource + * and default labels configured on the provider. + * + */ + public Map pulumiLabels() { + return this.pulumiLabels; + } + /** + * @return The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + * + */ + public List sanDnsnames() { + return this.sanDnsnames; + } + /** + * @return The scope of the certificate. + * + * DEFAULT: Certificates with default scope are served from core Google data centers. + * If unsure, choose this option. + * + * EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence. + * See https://cloud.google.com/vpc/docs/edge-locations. + * + * ALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs). + * See https://cloud.google.com/compute/docs/regions-zones + * + */ + public String scope() { + return this.scope; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetCertificatesCertificate defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String description; + private Map effectiveLabels; + private Map labels; + private String location; + private List manageds; + private String name; + private String project; + private Map pulumiLabels; + private List sanDnsnames; + private String scope; + public Builder() {} + public Builder(GetCertificatesCertificate defaults) { + Objects.requireNonNull(defaults); + this.description = defaults.description; + this.effectiveLabels = defaults.effectiveLabels; + this.labels = defaults.labels; + this.location = defaults.location; + this.manageds = defaults.manageds; + this.name = defaults.name; + this.project = defaults.project; + this.pulumiLabels = defaults.pulumiLabels; + this.sanDnsnames = defaults.sanDnsnames; + this.scope = defaults.scope; + } + + @CustomType.Setter + public Builder description(String description) { + if (description == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificate", "description"); + } + this.description = description; + return this; + } + @CustomType.Setter + public Builder effectiveLabels(Map effectiveLabels) { + if (effectiveLabels == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificate", "effectiveLabels"); + } + this.effectiveLabels = effectiveLabels; + return this; + } + @CustomType.Setter + public Builder labels(Map labels) { + if (labels == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificate", "labels"); + } + this.labels = labels; + return this; + } + @CustomType.Setter + public Builder location(String location) { + if (location == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificate", "location"); + } + this.location = location; + return this; + } + @CustomType.Setter + public Builder manageds(List manageds) { + if (manageds == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificate", "manageds"); + } + this.manageds = manageds; + return this; + } + public Builder manageds(GetCertificatesCertificateManaged... manageds) { + return manageds(List.of(manageds)); + } + @CustomType.Setter + public Builder name(String name) { + if (name == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificate", "name"); + } + this.name = name; + return this; + } + @CustomType.Setter + public Builder project(String project) { + if (project == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificate", "project"); + } + this.project = project; + return this; + } + @CustomType.Setter + public Builder pulumiLabels(Map pulumiLabels) { + if (pulumiLabels == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificate", "pulumiLabels"); + } + this.pulumiLabels = pulumiLabels; + return this; + } + @CustomType.Setter + public Builder sanDnsnames(List sanDnsnames) { + if (sanDnsnames == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificate", "sanDnsnames"); + } + this.sanDnsnames = sanDnsnames; + return this; + } + public Builder sanDnsnames(String... sanDnsnames) { + return sanDnsnames(List.of(sanDnsnames)); + } + @CustomType.Setter + public Builder scope(String scope) { + if (scope == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificate", "scope"); + } + this.scope = scope; + return this; + } + public GetCertificatesCertificate build() { + final var _resultValue = new GetCertificatesCertificate(); + _resultValue.description = description; + _resultValue.effectiveLabels = effectiveLabels; + _resultValue.labels = labels; + _resultValue.location = location; + _resultValue.manageds = manageds; + _resultValue.name = name; + _resultValue.project = project; + _resultValue.pulumiLabels = pulumiLabels; + _resultValue.sanDnsnames = sanDnsnames; + _resultValue.scope = scope; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificateManaged.java b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificateManaged.java new file mode 100644 index 0000000000..f5c21fcac8 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificateManaged.java @@ -0,0 +1,196 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.certificatemanager.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.certificatemanager.outputs.GetCertificatesCertificateManagedAuthorizationAttemptInfo; +import com.pulumi.gcp.certificatemanager.outputs.GetCertificatesCertificateManagedProvisioningIssue; +import java.lang.String; +import java.util.List; +import java.util.Objects; + +@CustomType +public final class GetCertificatesCertificateManaged { + /** + * @return Detailed state of the latest authorization attempt for each domain + * specified for this Managed Certificate. + * + */ + private List authorizationAttemptInfos; + /** + * @return Authorizations that will be used for performing domain authorization. Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + * + */ + private List dnsAuthorizations; + /** + * @return The domains for which a managed SSL certificate will be generated. + * Wildcard domains are only supported with DNS challenge resolution + * + */ + private List domains; + /** + * @return The resource name for a CertificateIssuanceConfig used to configure private PKI certificates in the format projects/*/locations/*/certificateIssuanceConfigs/*. + * If this field is not set, the certificates will instead be publicly signed as documented at https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa. + * Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + * + */ + private String issuanceConfig; + /** + * @return Information about issues with provisioning this Managed Certificate. + * + */ + private List provisioningIssues; + /** + * @return A state of this Managed Certificate. + * + */ + private String state; + + private GetCertificatesCertificateManaged() {} + /** + * @return Detailed state of the latest authorization attempt for each domain + * specified for this Managed Certificate. + * + */ + public List authorizationAttemptInfos() { + return this.authorizationAttemptInfos; + } + /** + * @return Authorizations that will be used for performing domain authorization. Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + * + */ + public List dnsAuthorizations() { + return this.dnsAuthorizations; + } + /** + * @return The domains for which a managed SSL certificate will be generated. + * Wildcard domains are only supported with DNS challenge resolution + * + */ + public List domains() { + return this.domains; + } + /** + * @return The resource name for a CertificateIssuanceConfig used to configure private PKI certificates in the format projects/*/locations/*/certificateIssuanceConfigs/*. + * If this field is not set, the certificates will instead be publicly signed as documented at https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa. + * Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + * + */ + public String issuanceConfig() { + return this.issuanceConfig; + } + /** + * @return Information about issues with provisioning this Managed Certificate. + * + */ + public List provisioningIssues() { + return this.provisioningIssues; + } + /** + * @return A state of this Managed Certificate. + * + */ + public String state() { + return this.state; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetCertificatesCertificateManaged defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private List authorizationAttemptInfos; + private List dnsAuthorizations; + private List domains; + private String issuanceConfig; + private List provisioningIssues; + private String state; + public Builder() {} + public Builder(GetCertificatesCertificateManaged defaults) { + Objects.requireNonNull(defaults); + this.authorizationAttemptInfos = defaults.authorizationAttemptInfos; + this.dnsAuthorizations = defaults.dnsAuthorizations; + this.domains = defaults.domains; + this.issuanceConfig = defaults.issuanceConfig; + this.provisioningIssues = defaults.provisioningIssues; + this.state = defaults.state; + } + + @CustomType.Setter + public Builder authorizationAttemptInfos(List authorizationAttemptInfos) { + if (authorizationAttemptInfos == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificateManaged", "authorizationAttemptInfos"); + } + this.authorizationAttemptInfos = authorizationAttemptInfos; + return this; + } + public Builder authorizationAttemptInfos(GetCertificatesCertificateManagedAuthorizationAttemptInfo... authorizationAttemptInfos) { + return authorizationAttemptInfos(List.of(authorizationAttemptInfos)); + } + @CustomType.Setter + public Builder dnsAuthorizations(List dnsAuthorizations) { + if (dnsAuthorizations == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificateManaged", "dnsAuthorizations"); + } + this.dnsAuthorizations = dnsAuthorizations; + return this; + } + public Builder dnsAuthorizations(String... dnsAuthorizations) { + return dnsAuthorizations(List.of(dnsAuthorizations)); + } + @CustomType.Setter + public Builder domains(List domains) { + if (domains == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificateManaged", "domains"); + } + this.domains = domains; + return this; + } + public Builder domains(String... domains) { + return domains(List.of(domains)); + } + @CustomType.Setter + public Builder issuanceConfig(String issuanceConfig) { + if (issuanceConfig == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificateManaged", "issuanceConfig"); + } + this.issuanceConfig = issuanceConfig; + return this; + } + @CustomType.Setter + public Builder provisioningIssues(List provisioningIssues) { + if (provisioningIssues == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificateManaged", "provisioningIssues"); + } + this.provisioningIssues = provisioningIssues; + return this; + } + public Builder provisioningIssues(GetCertificatesCertificateManagedProvisioningIssue... provisioningIssues) { + return provisioningIssues(List.of(provisioningIssues)); + } + @CustomType.Setter + public Builder state(String state) { + if (state == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificateManaged", "state"); + } + this.state = state; + return this; + } + public GetCertificatesCertificateManaged build() { + final var _resultValue = new GetCertificatesCertificateManaged(); + _resultValue.authorizationAttemptInfos = authorizationAttemptInfos; + _resultValue.dnsAuthorizations = dnsAuthorizations; + _resultValue.domains = domains; + _resultValue.issuanceConfig = issuanceConfig; + _resultValue.provisioningIssues = provisioningIssues; + _resultValue.state = state; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificateManagedAuthorizationAttemptInfo.java b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificateManagedAuthorizationAttemptInfo.java new file mode 100644 index 0000000000..ab269b4fa2 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificateManagedAuthorizationAttemptInfo.java @@ -0,0 +1,131 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.certificatemanager.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetCertificatesCertificateManagedAuthorizationAttemptInfo { + /** + * @return Human readable explanation for reaching the state. Provided to help + * address the configuration issues. + * Not guaranteed to be stable. For programmatic access use 'failure_reason' field. + * + */ + private String details; + /** + * @return Domain name of the authorization attempt. + * + */ + private String domain; + /** + * @return Reason for failure of the authorization attempt for the domain. + * + */ + private String failureReason; + /** + * @return State of the domain for managed certificate issuance. + * + */ + private String state; + + private GetCertificatesCertificateManagedAuthorizationAttemptInfo() {} + /** + * @return Human readable explanation for reaching the state. Provided to help + * address the configuration issues. + * Not guaranteed to be stable. For programmatic access use 'failure_reason' field. + * + */ + public String details() { + return this.details; + } + /** + * @return Domain name of the authorization attempt. + * + */ + public String domain() { + return this.domain; + } + /** + * @return Reason for failure of the authorization attempt for the domain. + * + */ + public String failureReason() { + return this.failureReason; + } + /** + * @return State of the domain for managed certificate issuance. + * + */ + public String state() { + return this.state; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetCertificatesCertificateManagedAuthorizationAttemptInfo defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String details; + private String domain; + private String failureReason; + private String state; + public Builder() {} + public Builder(GetCertificatesCertificateManagedAuthorizationAttemptInfo defaults) { + Objects.requireNonNull(defaults); + this.details = defaults.details; + this.domain = defaults.domain; + this.failureReason = defaults.failureReason; + this.state = defaults.state; + } + + @CustomType.Setter + public Builder details(String details) { + if (details == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificateManagedAuthorizationAttemptInfo", "details"); + } + this.details = details; + return this; + } + @CustomType.Setter + public Builder domain(String domain) { + if (domain == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificateManagedAuthorizationAttemptInfo", "domain"); + } + this.domain = domain; + return this; + } + @CustomType.Setter + public Builder failureReason(String failureReason) { + if (failureReason == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificateManagedAuthorizationAttemptInfo", "failureReason"); + } + this.failureReason = failureReason; + return this; + } + @CustomType.Setter + public Builder state(String state) { + if (state == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificateManagedAuthorizationAttemptInfo", "state"); + } + this.state = state; + return this; + } + public GetCertificatesCertificateManagedAuthorizationAttemptInfo build() { + final var _resultValue = new GetCertificatesCertificateManagedAuthorizationAttemptInfo(); + _resultValue.details = details; + _resultValue.domain = domain; + _resultValue.failureReason = failureReason; + _resultValue.state = state; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificateManagedProvisioningIssue.java b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificateManagedProvisioningIssue.java new file mode 100644 index 0000000000..5f9c9d36bd --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesCertificateManagedProvisioningIssue.java @@ -0,0 +1,85 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.certificatemanager.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetCertificatesCertificateManagedProvisioningIssue { + /** + * @return Human readable explanation about the issue. Provided to help address + * the configuration issues. + * Not guaranteed to be stable. For programmatic access use 'reason' field. + * + */ + private String details; + /** + * @return Reason for provisioning failures. + * + */ + private String reason; + + private GetCertificatesCertificateManagedProvisioningIssue() {} + /** + * @return Human readable explanation about the issue. Provided to help address + * the configuration issues. + * Not guaranteed to be stable. For programmatic access use 'reason' field. + * + */ + public String details() { + return this.details; + } + /** + * @return Reason for provisioning failures. + * + */ + public String reason() { + return this.reason; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetCertificatesCertificateManagedProvisioningIssue defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String details; + private String reason; + public Builder() {} + public Builder(GetCertificatesCertificateManagedProvisioningIssue defaults) { + Objects.requireNonNull(defaults); + this.details = defaults.details; + this.reason = defaults.reason; + } + + @CustomType.Setter + public Builder details(String details) { + if (details == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificateManagedProvisioningIssue", "details"); + } + this.details = details; + return this; + } + @CustomType.Setter + public Builder reason(String reason) { + if (reason == null) { + throw new MissingRequiredPropertyException("GetCertificatesCertificateManagedProvisioningIssue", "reason"); + } + this.reason = reason; + return this; + } + public GetCertificatesCertificateManagedProvisioningIssue build() { + final var _resultValue = new GetCertificatesCertificateManagedProvisioningIssue(); + _resultValue.details = details; + _resultValue.reason = reason; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesResult.java b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesResult.java new file mode 100644 index 0000000000..269542ba1a --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/certificatemanager/outputs/GetCertificatesResult.java @@ -0,0 +1,106 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.certificatemanager.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.certificatemanager.outputs.GetCertificatesCertificate; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class GetCertificatesResult { + private List certificates; + private @Nullable String filter; + /** + * @return The provider-assigned unique ID for this managed resource. + * + */ + private String id; + private @Nullable String region; + + private GetCertificatesResult() {} + public List certificates() { + return this.certificates; + } + public Optional filter() { + return Optional.ofNullable(this.filter); + } + /** + * @return The provider-assigned unique ID for this managed resource. + * + */ + public String id() { + return this.id; + } + public Optional region() { + return Optional.ofNullable(this.region); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetCertificatesResult defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private List certificates; + private @Nullable String filter; + private String id; + private @Nullable String region; + public Builder() {} + public Builder(GetCertificatesResult defaults) { + Objects.requireNonNull(defaults); + this.certificates = defaults.certificates; + this.filter = defaults.filter; + this.id = defaults.id; + this.region = defaults.region; + } + + @CustomType.Setter + public Builder certificates(List certificates) { + if (certificates == null) { + throw new MissingRequiredPropertyException("GetCertificatesResult", "certificates"); + } + this.certificates = certificates; + return this; + } + public Builder certificates(GetCertificatesCertificate... certificates) { + return certificates(List.of(certificates)); + } + @CustomType.Setter + public Builder filter(@Nullable String filter) { + + this.filter = filter; + return this; + } + @CustomType.Setter + public Builder id(String id) { + if (id == null) { + throw new MissingRequiredPropertyException("GetCertificatesResult", "id"); + } + this.id = id; + return this; + } + @CustomType.Setter + public Builder region(@Nullable String region) { + + this.region = region; + return this; + } + public GetCertificatesResult build() { + final var _resultValue = new GetCertificatesResult(); + _resultValue.certificates = certificates; + _resultValue.filter = filter; + _resultValue.id = id; + _resultValue.region = region; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudbuild/inputs/WorkerPoolWorkerConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudbuild/inputs/WorkerPoolWorkerConfigArgs.java index 112b302ebe..b287762830 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudbuild/inputs/WorkerPoolWorkerConfigArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudbuild/inputs/WorkerPoolWorkerConfigArgs.java @@ -18,14 +18,14 @@ public final class WorkerPoolWorkerConfigArgs extends com.pulumi.resources.Resou public static final WorkerPoolWorkerConfigArgs Empty = new WorkerPoolWorkerConfigArgs(); /** - * Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + * Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. * */ @Import(name="diskSizeGb") private @Nullable Output diskSizeGb; /** - * @return Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + * @return Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. * */ public Optional> diskSizeGb() { @@ -33,14 +33,14 @@ public Optional> diskSizeGb() { } /** - * Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + * Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. * */ @Import(name="machineType") private @Nullable Output machineType; /** - * @return Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + * @return Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. * */ public Optional> machineType() { @@ -89,7 +89,7 @@ public Builder(WorkerPoolWorkerConfigArgs defaults) { } /** - * @param diskSizeGb Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + * @param diskSizeGb Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. * * @return builder * @@ -100,7 +100,7 @@ public Builder diskSizeGb(@Nullable Output diskSizeGb) { } /** - * @param diskSizeGb Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + * @param diskSizeGb Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. * * @return builder * @@ -110,7 +110,7 @@ public Builder diskSizeGb(Integer diskSizeGb) { } /** - * @param machineType Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + * @param machineType Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. * * @return builder * @@ -121,7 +121,7 @@ public Builder machineType(@Nullable Output machineType) { } /** - * @param machineType Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + * @param machineType Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudbuild/outputs/WorkerPoolWorkerConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudbuild/outputs/WorkerPoolWorkerConfig.java index d20a81def0..a1c39d9ccb 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudbuild/outputs/WorkerPoolWorkerConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudbuild/outputs/WorkerPoolWorkerConfig.java @@ -14,12 +14,12 @@ @CustomType public final class WorkerPoolWorkerConfig { /** - * @return Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + * @return Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. * */ private @Nullable Integer diskSizeGb; /** - * @return Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + * @return Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. * */ private @Nullable String machineType; @@ -31,14 +31,14 @@ public final class WorkerPoolWorkerConfig { private WorkerPoolWorkerConfig() {} /** - * @return Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + * @return Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. * */ public Optional diskSizeGb() { return Optional.ofNullable(this.diskSizeGb); } /** - * @return Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + * @return Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. * */ public Optional machineType() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/inputs/ServiceTemplateSpecVolumeArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/inputs/ServiceTemplateSpecVolumeArgs.java index cc02fca683..fdec180f66 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/inputs/ServiceTemplateSpecVolumeArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/inputs/ServiceTemplateSpecVolumeArgs.java @@ -71,8 +71,7 @@ public Output name() { /** * A filesystem backed by a Network File System share. This filesystem requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * Structure is documented below. * */ @@ -81,8 +80,7 @@ public Output name() { /** * @return A filesystem backed by a Network File System share. This filesystem requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * Structure is documented below. * */ @@ -208,8 +206,7 @@ public Builder name(String name) { /** * @param nfs A filesystem backed by a Network File System share. This filesystem requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * Structure is documented below. * * @return builder @@ -222,8 +219,7 @@ public Builder nfs(@Nullable Output nfs) { /** * @param nfs A filesystem backed by a Network File System share. This filesystem requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * Structure is documented below. * * @return builder diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/inputs/ServiceTemplateSpecVolumeCsiArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/inputs/ServiceTemplateSpecVolumeCsiArgs.java index 9d3cb7718f..fd085a60fc 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/inputs/ServiceTemplateSpecVolumeCsiArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/inputs/ServiceTemplateSpecVolumeCsiArgs.java @@ -21,8 +21,7 @@ public final class ServiceTemplateSpecVolumeCsiArgs extends com.pulumi.resources /** * Unique name representing the type of file system to be created. Cloud Run supports the following values: * * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * */ @Import(name="driver", required=true) @@ -31,8 +30,7 @@ public final class ServiceTemplateSpecVolumeCsiArgs extends com.pulumi.resources /** * @return Unique name representing the type of file system to be created. Cloud Run supports the following values: * * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * */ public Output driver() { @@ -102,8 +100,7 @@ public Builder(ServiceTemplateSpecVolumeCsiArgs defaults) { /** * @param driver Unique name representing the type of file system to be created. Cloud Run supports the following values: * * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * * @return builder * @@ -116,8 +113,7 @@ public Builder driver(Output driver) { /** * @param driver Unique name representing the type of file system to be created. Cloud Run supports the following values: * * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/GetServiceTemplateSpecVolume.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/GetServiceTemplateSpecVolume.java index 0acb4ef7c1..04430e9a00 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/GetServiceTemplateSpecVolume.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/GetServiceTemplateSpecVolume.java @@ -32,8 +32,7 @@ public final class GetServiceTemplateSpecVolume { private String name; /** * @return A filesystem backed by a Network File System share. This filesystem requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * */ private List nfs; @@ -69,8 +68,7 @@ public String name() { } /** * @return A filesystem backed by a Network File System share. This filesystem requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * */ public List nfs() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/GetServiceTemplateSpecVolumeCsi.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/GetServiceTemplateSpecVolumeCsi.java index 8c0920e03f..01568a5068 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/GetServiceTemplateSpecVolumeCsi.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/GetServiceTemplateSpecVolumeCsi.java @@ -15,8 +15,7 @@ public final class GetServiceTemplateSpecVolumeCsi { /** * @return Unique name representing the type of file system to be created. Cloud Run supports the following values: * * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * */ private String driver; @@ -37,8 +36,7 @@ private GetServiceTemplateSpecVolumeCsi() {} /** * @return Unique name representing the type of file system to be created. Cloud Run supports the following values: * * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * */ public String driver() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/ServiceTemplateSpecVolume.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/ServiceTemplateSpecVolume.java index 2bccd8b036..25f6645113 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/ServiceTemplateSpecVolume.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/ServiceTemplateSpecVolume.java @@ -35,8 +35,7 @@ public final class ServiceTemplateSpecVolume { private String name; /** * @return A filesystem backed by a Network File System share. This filesystem requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * Structure is documented below. * */ @@ -76,8 +75,7 @@ public String name() { } /** * @return A filesystem backed by a Network File System share. This filesystem requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * Structure is documented below. * */ diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/ServiceTemplateSpecVolumeCsi.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/ServiceTemplateSpecVolumeCsi.java index c279b28888..45d709c492 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/ServiceTemplateSpecVolumeCsi.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrun/outputs/ServiceTemplateSpecVolumeCsi.java @@ -17,8 +17,7 @@ public final class ServiceTemplateSpecVolumeCsi { /** * @return Unique name representing the type of file system to be created. Cloud Run supports the following values: * * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * */ private String driver; @@ -39,8 +38,7 @@ private ServiceTemplateSpecVolumeCsi() {} /** * @return Unique name representing the type of file system to be created. Cloud Run supports the following values: * * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * */ public String driver() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/Service.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/Service.java index 6a345f4379..0b9e138e63 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/Service.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/Service.java @@ -642,7 +642,6 @@ * .name("cloudrun-service") * .location("us-central1") * .deletionProtection(false) - * .launchStage("BETA") * .template(ServiceTemplateArgs.builder() * .executionEnvironment("EXECUTION_ENVIRONMENT_GEN2") * .containers(ServiceTemplateContainerArgs.builder() @@ -717,7 +716,6 @@ * .location("us-central1") * .deletionProtection(false) * .ingress("INGRESS_TRAFFIC_ALL") - * .launchStage("BETA") * .template(ServiceTemplateArgs.builder() * .executionEnvironment("EXECUTION_ENVIRONMENT_GEN2") * .containers(ServiceTemplateContainerArgs.builder() @@ -749,6 +747,70 @@ * } * * <!--End PulumiCodeChooser --> + * ### Cloudrunv2 Service Mesh + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.networkservices.Mesh;
+ * import com.pulumi.gcp.networkservices.MeshArgs;
+ * import com.pulumi.time.sleep;
+ * import com.pulumi.time.SleepArgs;
+ * import com.pulumi.gcp.cloudrunv2.Service;
+ * import com.pulumi.gcp.cloudrunv2.ServiceArgs;
+ * import com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateArgs;
+ * import com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateServiceMeshArgs;
+ * import com.pulumi.resources.CustomResourceOptions;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var mesh = new Mesh("mesh", MeshArgs.builder()
+ *             .name("network-services-mesh")
+ *             .build());
+ * 
+ *         var waitForMesh = new Sleep("waitForMesh", SleepArgs.builder()
+ *             .createDuration("1m")
+ *             .build(), CustomResourceOptions.builder()
+ *                 .dependsOn(mesh)
+ *                 .build());
+ * 
+ *         var default_ = new Service("default", ServiceArgs.builder()
+ *             .name("cloudrun-service")
+ *             .deletionProtection(false)
+ *             .location("us-central1")
+ *             .launchStage("BETA")
+ *             .template(ServiceTemplateArgs.builder()
+ *                 .containers(ServiceTemplateContainerArgs.builder()
+ *                     .image("us-docker.pkg.dev/cloudrun/container/hello")
+ *                     .build())
+ *                 .serviceMesh(ServiceTemplateServiceMeshArgs.builder()
+ *                     .mesh(mesh.id())
+ *                     .build())
+ *                 .build())
+ *             .build(), CustomResourceOptions.builder()
+ *                 .dependsOn(waitForMesh)
+ *                 .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> * * ## Import * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/JobTemplateTemplateVolumeArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/JobTemplateTemplateVolumeArgs.java index f978f9b5cf..384d14d032 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/JobTemplateTemplateVolumeArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/JobTemplateTemplateVolumeArgs.java @@ -56,7 +56,7 @@ public Optional> emptyDir() { } /** - * Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + * Cloud Storage bucket mounted as a volume using GCSFuse. * Structure is documented below. * */ @@ -64,7 +64,7 @@ public Optional> emptyDir() { private @Nullable Output gcs; /** - * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + * @return Cloud Storage bucket mounted as a volume using GCSFuse. * Structure is documented below. * */ @@ -88,7 +88,7 @@ public Output name() { } /** - * NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + * NFS share mounted as a volume. * Structure is documented below. * */ @@ -96,7 +96,7 @@ public Output name() { private @Nullable Output nfs; /** - * @return NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + * @return NFS share mounted as a volume. * Structure is documented below. * */ @@ -197,7 +197,7 @@ public Builder emptyDir(JobTemplateTemplateVolumeEmptyDirArgs emptyDir) { } /** - * @param gcs Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + * @param gcs Cloud Storage bucket mounted as a volume using GCSFuse. * Structure is documented below. * * @return builder @@ -209,7 +209,7 @@ public Builder gcs(@Nullable Output gcs) { } /** - * @param gcs Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + * @param gcs Cloud Storage bucket mounted as a volume using GCSFuse. * Structure is documented below. * * @return builder @@ -241,7 +241,7 @@ public Builder name(String name) { } /** - * @param nfs NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + * @param nfs NFS share mounted as a volume. * Structure is documented below. * * @return builder @@ -253,7 +253,7 @@ public Builder nfs(@Nullable Output nfs) { } /** - * @param nfs NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + * @param nfs NFS share mounted as a volume. * Structure is documented below. * * @return builder diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateArgs.java index 1af5531255..0e017fe756 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateArgs.java @@ -7,6 +7,7 @@ import com.pulumi.core.annotations.Import; import com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateContainerArgs; import com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateScalingArgs; +import com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateServiceMeshArgs; import com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateVolumeArgs; import com.pulumi.gcp.cloudrunv2.inputs.ServiceTemplateVpcAccessArgs; import java.lang.Boolean; @@ -178,6 +179,23 @@ public Optional> serviceAccount() { return Optional.ofNullable(this.serviceAccount); } + /** + * Enables Cloud Service Mesh for this Revision. + * Structure is documented below. + * + */ + @Import(name="serviceMesh") + private @Nullable Output serviceMesh; + + /** + * @return Enables Cloud Service Mesh for this Revision. + * Structure is documented below. + * + */ + public Optional> serviceMesh() { + return Optional.ofNullable(this.serviceMesh); + } + /** * Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity * @@ -256,6 +274,7 @@ private ServiceTemplateArgs(ServiceTemplateArgs $) { this.revision = $.revision; this.scaling = $.scaling; this.serviceAccount = $.serviceAccount; + this.serviceMesh = $.serviceMesh; this.sessionAffinity = $.sessionAffinity; this.timeout = $.timeout; this.volumes = $.volumes; @@ -500,6 +519,29 @@ public Builder serviceAccount(String serviceAccount) { return serviceAccount(Output.of(serviceAccount)); } + /** + * @param serviceMesh Enables Cloud Service Mesh for this Revision. + * Structure is documented below. + * + * @return builder + * + */ + public Builder serviceMesh(@Nullable Output serviceMesh) { + $.serviceMesh = serviceMesh; + return this; + } + + /** + * @param serviceMesh Enables Cloud Service Mesh for this Revision. + * Structure is documented below. + * + * @return builder + * + */ + public Builder serviceMesh(ServiceTemplateServiceMeshArgs serviceMesh) { + return serviceMesh(Output.of(serviceMesh)); + } + /** * @param sessionAffinity Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateServiceMeshArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateServiceMeshArgs.java new file mode 100644 index 0000000000..477fcc668b --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateServiceMeshArgs.java @@ -0,0 +1,91 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudrunv2.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class ServiceTemplateServiceMeshArgs extends com.pulumi.resources.ResourceArgs { + + public static final ServiceTemplateServiceMeshArgs Empty = new ServiceTemplateServiceMeshArgs(); + + /** + * The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + * + * *** + * + */ + @Import(name="mesh") + private @Nullable Output mesh; + + /** + * @return The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + * + * *** + * + */ + public Optional> mesh() { + return Optional.ofNullable(this.mesh); + } + + private ServiceTemplateServiceMeshArgs() {} + + private ServiceTemplateServiceMeshArgs(ServiceTemplateServiceMeshArgs $) { + this.mesh = $.mesh; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(ServiceTemplateServiceMeshArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private ServiceTemplateServiceMeshArgs $; + + public Builder() { + $ = new ServiceTemplateServiceMeshArgs(); + } + + public Builder(ServiceTemplateServiceMeshArgs defaults) { + $ = new ServiceTemplateServiceMeshArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param mesh The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + * + * *** + * + * @return builder + * + */ + public Builder mesh(@Nullable Output mesh) { + $.mesh = mesh; + return this; + } + + /** + * @param mesh The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + * + * *** + * + * @return builder + * + */ + public Builder mesh(String mesh) { + return mesh(Output.of(mesh)); + } + + public ServiceTemplateServiceMeshArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateVolumeArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateVolumeArgs.java index 25cf4bb314..d52cfa608f 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateVolumeArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateVolumeArgs.java @@ -56,7 +56,7 @@ public Optional> emptyDir() { } /** - * Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + * Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. * Structure is documented below. * */ @@ -64,7 +64,7 @@ public Optional> emptyDir() { private @Nullable Output gcs; /** - * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. * Structure is documented below. * */ @@ -197,7 +197,7 @@ public Builder emptyDir(ServiceTemplateVolumeEmptyDirArgs emptyDir) { } /** - * @param gcs Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + * @param gcs Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. * Structure is documented below. * * @return builder @@ -209,7 +209,7 @@ public Builder gcs(@Nullable Output gcs) { } /** - * @param gcs Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + * @param gcs Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. * Structure is documented below. * * @return builder diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateVolumeNfsArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateVolumeNfsArgs.java index ad251a73c7..c2ad5d1625 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateVolumeNfsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/inputs/ServiceTemplateVolumeNfsArgs.java @@ -35,8 +35,6 @@ public Output path() { /** * If true, mount the NFS volume as read only * - * *** - * */ @Import(name="readOnly") private @Nullable Output readOnly; @@ -44,8 +42,6 @@ public Output path() { /** * @return If true, mount the NFS volume as read only * - * *** - * */ public Optional> readOnly() { return Optional.ofNullable(this.readOnly); @@ -116,8 +112,6 @@ public Builder path(String path) { /** * @param readOnly If true, mount the NFS volume as read only * - * *** - * * @return builder * */ @@ -129,8 +123,6 @@ public Builder readOnly(@Nullable Output readOnly) { /** * @param readOnly If true, mount the NFS volume as read only * - * *** - * * @return builder * */ diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetJobTemplateTemplateVolume.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetJobTemplateTemplateVolume.java index 02bea854c1..54418bc582 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetJobTemplateTemplateVolume.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetJobTemplateTemplateVolume.java @@ -27,7 +27,7 @@ public final class GetJobTemplateTemplateVolume { */ private List emptyDirs; /** - * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + * @return Cloud Storage bucket mounted as a volume using GCSFuse. * */ private List gcs; @@ -37,7 +37,7 @@ public final class GetJobTemplateTemplateVolume { */ private String name; /** - * @return NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + * @return NFS share mounted as a volume. * */ private List nfs; @@ -63,7 +63,7 @@ public List emptyDirs() { return this.emptyDirs; } /** - * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + * @return Cloud Storage bucket mounted as a volume using GCSFuse. * */ public List gcs() { @@ -77,7 +77,7 @@ public String name() { return this.name; } /** - * @return NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + * @return NFS share mounted as a volume. * */ public List nfs() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetServiceTemplate.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetServiceTemplate.java index 7228ba2a0f..834edff4e7 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetServiceTemplate.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetServiceTemplate.java @@ -7,6 +7,7 @@ import com.pulumi.exceptions.MissingRequiredPropertyException; import com.pulumi.gcp.cloudrunv2.outputs.GetServiceTemplateContainer; import com.pulumi.gcp.cloudrunv2.outputs.GetServiceTemplateScaling; +import com.pulumi.gcp.cloudrunv2.outputs.GetServiceTemplateServiceMesh; import com.pulumi.gcp.cloudrunv2.outputs.GetServiceTemplateVolume; import com.pulumi.gcp.cloudrunv2.outputs.GetServiceTemplateVpcAccess; import java.lang.Boolean; @@ -73,6 +74,11 @@ public final class GetServiceTemplate { * */ private String serviceAccount; + /** + * @return Enables Cloud Service Mesh for this Revision. + * + */ + private List serviceMeshes; /** * @return Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity * @@ -170,6 +176,13 @@ public List scalings() { public String serviceAccount() { return this.serviceAccount; } + /** + * @return Enables Cloud Service Mesh for this Revision. + * + */ + public List serviceMeshes() { + return this.serviceMeshes; + } /** * @return Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity * @@ -219,6 +232,7 @@ public static final class Builder { private String revision; private List scalings; private String serviceAccount; + private List serviceMeshes; private Boolean sessionAffinity; private String timeout; private List volumes; @@ -235,6 +249,7 @@ public Builder(GetServiceTemplate defaults) { this.revision = defaults.revision; this.scalings = defaults.scalings; this.serviceAccount = defaults.serviceAccount; + this.serviceMeshes = defaults.serviceMeshes; this.sessionAffinity = defaults.sessionAffinity; this.timeout = defaults.timeout; this.volumes = defaults.volumes; @@ -320,6 +335,17 @@ public Builder serviceAccount(String serviceAccount) { return this; } @CustomType.Setter + public Builder serviceMeshes(List serviceMeshes) { + if (serviceMeshes == null) { + throw new MissingRequiredPropertyException("GetServiceTemplate", "serviceMeshes"); + } + this.serviceMeshes = serviceMeshes; + return this; + } + public Builder serviceMeshes(GetServiceTemplateServiceMesh... serviceMeshes) { + return serviceMeshes(List.of(serviceMeshes)); + } + @CustomType.Setter public Builder sessionAffinity(Boolean sessionAffinity) { if (sessionAffinity == null) { throw new MissingRequiredPropertyException("GetServiceTemplate", "sessionAffinity"); @@ -368,6 +394,7 @@ public GetServiceTemplate build() { _resultValue.revision = revision; _resultValue.scalings = scalings; _resultValue.serviceAccount = serviceAccount; + _resultValue.serviceMeshes = serviceMeshes; _resultValue.sessionAffinity = sessionAffinity; _resultValue.timeout = timeout; _resultValue.volumes = volumes; diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetServiceTemplateServiceMesh.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetServiceTemplateServiceMesh.java new file mode 100644 index 0000000000..5a70306558 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetServiceTemplateServiceMesh.java @@ -0,0 +1,58 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudrunv2.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetServiceTemplateServiceMesh { + /** + * @return The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + * + */ + private String mesh; + + private GetServiceTemplateServiceMesh() {} + /** + * @return The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + * + */ + public String mesh() { + return this.mesh; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetServiceTemplateServiceMesh defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String mesh; + public Builder() {} + public Builder(GetServiceTemplateServiceMesh defaults) { + Objects.requireNonNull(defaults); + this.mesh = defaults.mesh; + } + + @CustomType.Setter + public Builder mesh(String mesh) { + if (mesh == null) { + throw new MissingRequiredPropertyException("GetServiceTemplateServiceMesh", "mesh"); + } + this.mesh = mesh; + return this; + } + public GetServiceTemplateServiceMesh build() { + final var _resultValue = new GetServiceTemplateServiceMesh(); + _resultValue.mesh = mesh; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetServiceTemplateVolume.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetServiceTemplateVolume.java index e35ada98ed..17d8032c69 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetServiceTemplateVolume.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/GetServiceTemplateVolume.java @@ -27,7 +27,7 @@ public final class GetServiceTemplateVolume { */ private List emptyDirs; /** - * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. * */ private List gcs; @@ -63,7 +63,7 @@ public List emptyDirs() { return this.emptyDirs; } /** - * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. * */ public List gcs() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/JobTemplateTemplateVolume.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/JobTemplateTemplateVolume.java index 8bd21380fa..6c9868051d 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/JobTemplateTemplateVolume.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/JobTemplateTemplateVolume.java @@ -30,7 +30,7 @@ public final class JobTemplateTemplateVolume { */ private @Nullable JobTemplateTemplateVolumeEmptyDir emptyDir; /** - * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + * @return Cloud Storage bucket mounted as a volume using GCSFuse. * Structure is documented below. * */ @@ -41,7 +41,7 @@ public final class JobTemplateTemplateVolume { */ private String name; /** - * @return NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + * @return NFS share mounted as a volume. * Structure is documented below. * */ @@ -71,7 +71,7 @@ public Optional emptyDir() { return Optional.ofNullable(this.emptyDir); } /** - * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + * @return Cloud Storage bucket mounted as a volume using GCSFuse. * Structure is documented below. * */ @@ -86,7 +86,7 @@ public String name() { return this.name; } /** - * @return NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + * @return NFS share mounted as a volume. * Structure is documented below. * */ diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplate.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplate.java index 248987e450..09c31368cc 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplate.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplate.java @@ -6,6 +6,7 @@ import com.pulumi.core.annotations.CustomType; import com.pulumi.gcp.cloudrunv2.outputs.ServiceTemplateContainer; import com.pulumi.gcp.cloudrunv2.outputs.ServiceTemplateScaling; +import com.pulumi.gcp.cloudrunv2.outputs.ServiceTemplateServiceMesh; import com.pulumi.gcp.cloudrunv2.outputs.ServiceTemplateVolume; import com.pulumi.gcp.cloudrunv2.outputs.ServiceTemplateVpcAccess; import java.lang.Boolean; @@ -74,6 +75,12 @@ public final class ServiceTemplate { * */ private @Nullable String serviceAccount; + /** + * @return Enables Cloud Service Mesh for this Revision. + * Structure is documented below. + * + */ + private @Nullable ServiceTemplateServiceMesh serviceMesh; /** * @return Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity * @@ -172,6 +179,14 @@ public Optional scaling() { public Optional serviceAccount() { return Optional.ofNullable(this.serviceAccount); } + /** + * @return Enables Cloud Service Mesh for this Revision. + * Structure is documented below. + * + */ + public Optional serviceMesh() { + return Optional.ofNullable(this.serviceMesh); + } /** * @return Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity * @@ -222,6 +237,7 @@ public static final class Builder { private @Nullable String revision; private @Nullable ServiceTemplateScaling scaling; private @Nullable String serviceAccount; + private @Nullable ServiceTemplateServiceMesh serviceMesh; private @Nullable Boolean sessionAffinity; private @Nullable String timeout; private @Nullable List volumes; @@ -238,6 +254,7 @@ public Builder(ServiceTemplate defaults) { this.revision = defaults.revision; this.scaling = defaults.scaling; this.serviceAccount = defaults.serviceAccount; + this.serviceMesh = defaults.serviceMesh; this.sessionAffinity = defaults.sessionAffinity; this.timeout = defaults.timeout; this.volumes = defaults.volumes; @@ -302,6 +319,12 @@ public Builder serviceAccount(@Nullable String serviceAccount) { return this; } @CustomType.Setter + public Builder serviceMesh(@Nullable ServiceTemplateServiceMesh serviceMesh) { + + this.serviceMesh = serviceMesh; + return this; + } + @CustomType.Setter public Builder sessionAffinity(@Nullable Boolean sessionAffinity) { this.sessionAffinity = sessionAffinity; @@ -339,6 +362,7 @@ public ServiceTemplate build() { _resultValue.revision = revision; _resultValue.scaling = scaling; _resultValue.serviceAccount = serviceAccount; + _resultValue.serviceMesh = serviceMesh; _resultValue.sessionAffinity = sessionAffinity; _resultValue.timeout = timeout; _resultValue.volumes = volumes; diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplateServiceMesh.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplateServiceMesh.java new file mode 100644 index 0000000000..c7cac11def --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplateServiceMesh.java @@ -0,0 +1,61 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudrunv2.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class ServiceTemplateServiceMesh { + /** + * @return The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + * + * *** + * + */ + private @Nullable String mesh; + + private ServiceTemplateServiceMesh() {} + /** + * @return The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + * + * *** + * + */ + public Optional mesh() { + return Optional.ofNullable(this.mesh); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(ServiceTemplateServiceMesh defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String mesh; + public Builder() {} + public Builder(ServiceTemplateServiceMesh defaults) { + Objects.requireNonNull(defaults); + this.mesh = defaults.mesh; + } + + @CustomType.Setter + public Builder mesh(@Nullable String mesh) { + + this.mesh = mesh; + return this; + } + public ServiceTemplateServiceMesh build() { + final var _resultValue = new ServiceTemplateServiceMesh(); + _resultValue.mesh = mesh; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplateVolume.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplateVolume.java index 9285e5d93f..0a0135a457 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplateVolume.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplateVolume.java @@ -30,7 +30,7 @@ public final class ServiceTemplateVolume { */ private @Nullable ServiceTemplateVolumeEmptyDir emptyDir; /** - * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. * Structure is documented below. * */ @@ -71,7 +71,7 @@ public Optional emptyDir() { return Optional.ofNullable(this.emptyDir); } /** - * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + * @return Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. * Structure is documented below. * */ diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplateVolumeNfs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplateVolumeNfs.java index 687e823848..ccc85573df 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplateVolumeNfs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudrunv2/outputs/ServiceTemplateVolumeNfs.java @@ -21,8 +21,6 @@ public final class ServiceTemplateVolumeNfs { /** * @return If true, mount the NFS volume as read only * - * *** - * */ private @Nullable Boolean readOnly; /** @@ -42,8 +40,6 @@ public String path() { /** * @return If true, mount the NFS volume as read only * - * *** - * */ public Optional readOnly() { return Optional.ofNullable(this.readOnly); diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/Queue.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/Queue.java index 493c1f3fea..6960e198f5 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/Queue.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/Queue.java @@ -11,6 +11,7 @@ import com.pulumi.gcp.cloudtasks.QueueArgs; import com.pulumi.gcp.cloudtasks.inputs.QueueState; import com.pulumi.gcp.cloudtasks.outputs.QueueAppEngineRoutingOverride; +import com.pulumi.gcp.cloudtasks.outputs.QueueHttpTarget; import com.pulumi.gcp.cloudtasks.outputs.QueueRateLimits; import com.pulumi.gcp.cloudtasks.outputs.QueueRetryConfig; import com.pulumi.gcp.cloudtasks.outputs.QueueStackdriverLoggingConfig; @@ -116,6 +117,164 @@ * } * * <!--End PulumiCodeChooser --> + * ### Cloud Tasks Queue Http Target Oidc + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.serviceaccount.Account;
+ * import com.pulumi.gcp.serviceaccount.AccountArgs;
+ * import com.pulumi.gcp.cloudtasks.Queue;
+ * import com.pulumi.gcp.cloudtasks.QueueArgs;
+ * import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetArgs;
+ * import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverrideArgs;
+ * import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverridePathOverrideArgs;
+ * import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverrideQueryOverrideArgs;
+ * import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetOidcTokenArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var oidcServiceAccount = new Account("oidcServiceAccount", AccountArgs.builder()
+ *             .accountId("example-oidc")
+ *             .displayName("Tasks Queue OIDC Service Account")
+ *             .build());
+ * 
+ *         var httpTargetOidc = new Queue("httpTargetOidc", QueueArgs.builder()
+ *             .name("cloud-tasks-queue-http-target-oidc")
+ *             .location("us-central1")
+ *             .httpTarget(QueueHttpTargetArgs.builder()
+ *                 .httpMethod("POST")
+ *                 .uriOverride(QueueHttpTargetUriOverrideArgs.builder()
+ *                     .scheme("HTTPS")
+ *                     .host("oidc.example.com")
+ *                     .port(8443)
+ *                     .pathOverride(QueueHttpTargetUriOverridePathOverrideArgs.builder()
+ *                         .path("/users/1234")
+ *                         .build())
+ *                     .queryOverride(QueueHttpTargetUriOverrideQueryOverrideArgs.builder()
+ *                         .queryParams("qparam1=123&qparam2=456")
+ *                         .build())
+ *                     .uriOverrideEnforceMode("IF_NOT_EXISTS")
+ *                     .build())
+ *                 .headerOverrides(                
+ *                     QueueHttpTargetHeaderOverrideArgs.builder()
+ *                         .header(QueueHttpTargetHeaderOverrideHeaderArgs.builder()
+ *                             .key("AddSomethingElse")
+ *                             .value("MyOtherValue")
+ *                             .build())
+ *                         .build(),
+ *                     QueueHttpTargetHeaderOverrideArgs.builder()
+ *                         .header(QueueHttpTargetHeaderOverrideHeaderArgs.builder()
+ *                             .key("AddMe")
+ *                             .value("MyValue")
+ *                             .build())
+ *                         .build())
+ *                 .oidcToken(QueueHttpTargetOidcTokenArgs.builder()
+ *                     .serviceAccountEmail(oidcServiceAccount.email())
+ *                     .audience("https://oidc.example.com")
+ *                     .build())
+ *                 .build())
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> + * ### Cloud Tasks Queue Http Target Oauth + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.serviceaccount.Account;
+ * import com.pulumi.gcp.serviceaccount.AccountArgs;
+ * import com.pulumi.gcp.cloudtasks.Queue;
+ * import com.pulumi.gcp.cloudtasks.QueueArgs;
+ * import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetArgs;
+ * import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverrideArgs;
+ * import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverridePathOverrideArgs;
+ * import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverrideQueryOverrideArgs;
+ * import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetOauthTokenArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var oauthServiceAccount = new Account("oauthServiceAccount", AccountArgs.builder()
+ *             .accountId("example-oauth")
+ *             .displayName("Tasks Queue OAuth Service Account")
+ *             .build());
+ * 
+ *         var httpTargetOauth = new Queue("httpTargetOauth", QueueArgs.builder()
+ *             .name("cloud-tasks-queue-http-target-oauth")
+ *             .location("us-central1")
+ *             .httpTarget(QueueHttpTargetArgs.builder()
+ *                 .httpMethod("POST")
+ *                 .uriOverride(QueueHttpTargetUriOverrideArgs.builder()
+ *                     .scheme("HTTPS")
+ *                     .host("oauth.example.com")
+ *                     .port(8443)
+ *                     .pathOverride(QueueHttpTargetUriOverridePathOverrideArgs.builder()
+ *                         .path("/users/1234")
+ *                         .build())
+ *                     .queryOverride(QueueHttpTargetUriOverrideQueryOverrideArgs.builder()
+ *                         .queryParams("qparam1=123&qparam2=456")
+ *                         .build())
+ *                     .uriOverrideEnforceMode("IF_NOT_EXISTS")
+ *                     .build())
+ *                 .headerOverrides(                
+ *                     QueueHttpTargetHeaderOverrideArgs.builder()
+ *                         .header(QueueHttpTargetHeaderOverrideHeaderArgs.builder()
+ *                             .key("AddSomethingElse")
+ *                             .value("MyOtherValue")
+ *                             .build())
+ *                         .build(),
+ *                     QueueHttpTargetHeaderOverrideArgs.builder()
+ *                         .header(QueueHttpTargetHeaderOverrideHeaderArgs.builder()
+ *                             .key("AddMe")
+ *                             .value("MyValue")
+ *                             .build())
+ *                         .build())
+ *                 .oauthToken(QueueHttpTargetOauthTokenArgs.builder()
+ *                     .serviceAccountEmail(oauthServiceAccount.email())
+ *                     .scope("openid https://www.googleapis.com/auth/userinfo.email")
+ *                     .build())
+ *                 .build())
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> * * ## Import * @@ -162,6 +321,22 @@ public class Queue extends com.pulumi.resources.CustomResource { public Output> appEngineRoutingOverride() { return Codegen.optional(this.appEngineRoutingOverride); } + /** + * Modifies HTTP target for HTTP tasks. + * Structure is documented below. + * + */ + @Export(name="httpTarget", refs={QueueHttpTarget.class}, tree="[0]") + private Output httpTarget; + + /** + * @return Modifies HTTP target for HTTP tasks. + * Structure is documented below. + * + */ + public Output> httpTarget() { + return Codegen.optional(this.httpTarget); + } /** * The location of the queue * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/QueueArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/QueueArgs.java index c6a2b1fa9f..a1a68d59b4 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/QueueArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/QueueArgs.java @@ -7,6 +7,7 @@ import com.pulumi.core.annotations.Import; import com.pulumi.exceptions.MissingRequiredPropertyException; import com.pulumi.gcp.cloudtasks.inputs.QueueAppEngineRoutingOverrideArgs; +import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetArgs; import com.pulumi.gcp.cloudtasks.inputs.QueueRateLimitsArgs; import com.pulumi.gcp.cloudtasks.inputs.QueueRetryConfigArgs; import com.pulumi.gcp.cloudtasks.inputs.QueueStackdriverLoggingConfigArgs; @@ -39,6 +40,23 @@ public Optional> appEngineRoutingOverr return Optional.ofNullable(this.appEngineRoutingOverride); } + /** + * Modifies HTTP target for HTTP tasks. + * Structure is documented below. + * + */ + @Import(name="httpTarget") + private @Nullable Output httpTarget; + + /** + * @return Modifies HTTP target for HTTP tasks. + * Structure is documented below. + * + */ + public Optional> httpTarget() { + return Optional.ofNullable(this.httpTarget); + } + /** * The location of the queue * @@ -157,6 +175,7 @@ private QueueArgs() {} private QueueArgs(QueueArgs $) { this.appEngineRoutingOverride = $.appEngineRoutingOverride; + this.httpTarget = $.httpTarget; this.location = $.location; this.name = $.name; this.project = $.project; @@ -208,6 +227,29 @@ public Builder appEngineRoutingOverride(QueueAppEngineRoutingOverrideArgs appEng return appEngineRoutingOverride(Output.of(appEngineRoutingOverride)); } + /** + * @param httpTarget Modifies HTTP target for HTTP tasks. + * Structure is documented below. + * + * @return builder + * + */ + public Builder httpTarget(@Nullable Output httpTarget) { + $.httpTarget = httpTarget; + return this; + } + + /** + * @param httpTarget Modifies HTTP target for HTTP tasks. + * Structure is documented below. + * + * @return builder + * + */ + public Builder httpTarget(QueueHttpTargetArgs httpTarget) { + return httpTarget(Output.of(httpTarget)); + } + /** * @param location The location of the queue * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetArgs.java new file mode 100644 index 0000000000..db1557d661 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetArgs.java @@ -0,0 +1,325 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetHeaderOverrideArgs; +import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetOauthTokenArgs; +import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetOidcTokenArgs; +import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverrideArgs; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class QueueHttpTargetArgs extends com.pulumi.resources.ResourceArgs { + + public static final QueueHttpTargetArgs Empty = new QueueHttpTargetArgs(); + + /** + * HTTP target headers. + * This map contains the header field names and values. + * Headers will be set when running the CreateTask and/or BufferTask. + * These headers represent a subset of the headers that will be configured for the task's HTTP request. + * Some HTTP request headers will be ignored or replaced. + * Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + * The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + * Structure is documented below. + * + */ + @Import(name="headerOverrides") + private @Nullable Output> headerOverrides; + + /** + * @return HTTP target headers. + * This map contains the header field names and values. + * Headers will be set when running the CreateTask and/or BufferTask. + * These headers represent a subset of the headers that will be configured for the task's HTTP request. + * Some HTTP request headers will be ignored or replaced. + * Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + * The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + * Structure is documented below. + * + */ + public Optional>> headerOverrides() { + return Optional.ofNullable(this.headerOverrides); + } + + /** + * The HTTP method to use for the request. + * When specified, it overrides HttpRequest for the task. + * Note that if the value is set to GET the body of the task will be ignored at execution time. + * Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + * + */ + @Import(name="httpMethod") + private @Nullable Output httpMethod; + + /** + * @return The HTTP method to use for the request. + * When specified, it overrides HttpRequest for the task. + * Note that if the value is set to GET the body of the task will be ignored at execution time. + * Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + * + */ + public Optional> httpMethod() { + return Optional.ofNullable(this.httpMethod); + } + + /** + * If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + * This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + * Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + * Structure is documented below. + * + */ + @Import(name="oauthToken") + private @Nullable Output oauthToken; + + /** + * @return If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + * This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + * Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + * Structure is documented below. + * + */ + public Optional> oauthToken() { + return Optional.ofNullable(this.oauthToken); + } + + /** + * If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + * This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + * Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + * Structure is documented below. + * + */ + @Import(name="oidcToken") + private @Nullable Output oidcToken; + + /** + * @return If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + * This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + * Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + * Structure is documented below. + * + */ + public Optional> oidcToken() { + return Optional.ofNullable(this.oidcToken); + } + + /** + * URI override. + * When specified, overrides the execution URI for all the tasks in the queue. + * Structure is documented below. + * + */ + @Import(name="uriOverride") + private @Nullable Output uriOverride; + + /** + * @return URI override. + * When specified, overrides the execution URI for all the tasks in the queue. + * Structure is documented below. + * + */ + public Optional> uriOverride() { + return Optional.ofNullable(this.uriOverride); + } + + private QueueHttpTargetArgs() {} + + private QueueHttpTargetArgs(QueueHttpTargetArgs $) { + this.headerOverrides = $.headerOverrides; + this.httpMethod = $.httpMethod; + this.oauthToken = $.oauthToken; + this.oidcToken = $.oidcToken; + this.uriOverride = $.uriOverride; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(QueueHttpTargetArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private QueueHttpTargetArgs $; + + public Builder() { + $ = new QueueHttpTargetArgs(); + } + + public Builder(QueueHttpTargetArgs defaults) { + $ = new QueueHttpTargetArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param headerOverrides HTTP target headers. + * This map contains the header field names and values. + * Headers will be set when running the CreateTask and/or BufferTask. + * These headers represent a subset of the headers that will be configured for the task's HTTP request. + * Some HTTP request headers will be ignored or replaced. + * Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + * The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + * Structure is documented below. + * + * @return builder + * + */ + public Builder headerOverrides(@Nullable Output> headerOverrides) { + $.headerOverrides = headerOverrides; + return this; + } + + /** + * @param headerOverrides HTTP target headers. + * This map contains the header field names and values. + * Headers will be set when running the CreateTask and/or BufferTask. + * These headers represent a subset of the headers that will be configured for the task's HTTP request. + * Some HTTP request headers will be ignored or replaced. + * Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + * The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + * Structure is documented below. + * + * @return builder + * + */ + public Builder headerOverrides(List headerOverrides) { + return headerOverrides(Output.of(headerOverrides)); + } + + /** + * @param headerOverrides HTTP target headers. + * This map contains the header field names and values. + * Headers will be set when running the CreateTask and/or BufferTask. + * These headers represent a subset of the headers that will be configured for the task's HTTP request. + * Some HTTP request headers will be ignored or replaced. + * Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + * The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + * Structure is documented below. + * + * @return builder + * + */ + public Builder headerOverrides(QueueHttpTargetHeaderOverrideArgs... headerOverrides) { + return headerOverrides(List.of(headerOverrides)); + } + + /** + * @param httpMethod The HTTP method to use for the request. + * When specified, it overrides HttpRequest for the task. + * Note that if the value is set to GET the body of the task will be ignored at execution time. + * Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + * + * @return builder + * + */ + public Builder httpMethod(@Nullable Output httpMethod) { + $.httpMethod = httpMethod; + return this; + } + + /** + * @param httpMethod The HTTP method to use for the request. + * When specified, it overrides HttpRequest for the task. + * Note that if the value is set to GET the body of the task will be ignored at execution time. + * Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + * + * @return builder + * + */ + public Builder httpMethod(String httpMethod) { + return httpMethod(Output.of(httpMethod)); + } + + /** + * @param oauthToken If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + * This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + * Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + * Structure is documented below. + * + * @return builder + * + */ + public Builder oauthToken(@Nullable Output oauthToken) { + $.oauthToken = oauthToken; + return this; + } + + /** + * @param oauthToken If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + * This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + * Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + * Structure is documented below. + * + * @return builder + * + */ + public Builder oauthToken(QueueHttpTargetOauthTokenArgs oauthToken) { + return oauthToken(Output.of(oauthToken)); + } + + /** + * @param oidcToken If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + * This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + * Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + * Structure is documented below. + * + * @return builder + * + */ + public Builder oidcToken(@Nullable Output oidcToken) { + $.oidcToken = oidcToken; + return this; + } + + /** + * @param oidcToken If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + * This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + * Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + * Structure is documented below. + * + * @return builder + * + */ + public Builder oidcToken(QueueHttpTargetOidcTokenArgs oidcToken) { + return oidcToken(Output.of(oidcToken)); + } + + /** + * @param uriOverride URI override. + * When specified, overrides the execution URI for all the tasks in the queue. + * Structure is documented below. + * + * @return builder + * + */ + public Builder uriOverride(@Nullable Output uriOverride) { + $.uriOverride = uriOverride; + return this; + } + + /** + * @param uriOverride URI override. + * When specified, overrides the execution URI for all the tasks in the queue. + * Structure is documented below. + * + * @return builder + * + */ + public Builder uriOverride(QueueHttpTargetUriOverrideArgs uriOverride) { + return uriOverride(Output.of(uriOverride)); + } + + public QueueHttpTargetArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetHeaderOverrideArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetHeaderOverrideArgs.java new file mode 100644 index 0000000000..91a17e0faf --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetHeaderOverrideArgs.java @@ -0,0 +1,89 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetHeaderOverrideHeaderArgs; +import java.util.Objects; + + +public final class QueueHttpTargetHeaderOverrideArgs extends com.pulumi.resources.ResourceArgs { + + public static final QueueHttpTargetHeaderOverrideArgs Empty = new QueueHttpTargetHeaderOverrideArgs(); + + /** + * Header embodying a key and a value. + * Structure is documented below. + * + */ + @Import(name="header", required=true) + private Output header; + + /** + * @return Header embodying a key and a value. + * Structure is documented below. + * + */ + public Output header() { + return this.header; + } + + private QueueHttpTargetHeaderOverrideArgs() {} + + private QueueHttpTargetHeaderOverrideArgs(QueueHttpTargetHeaderOverrideArgs $) { + this.header = $.header; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(QueueHttpTargetHeaderOverrideArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private QueueHttpTargetHeaderOverrideArgs $; + + public Builder() { + $ = new QueueHttpTargetHeaderOverrideArgs(); + } + + public Builder(QueueHttpTargetHeaderOverrideArgs defaults) { + $ = new QueueHttpTargetHeaderOverrideArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param header Header embodying a key and a value. + * Structure is documented below. + * + * @return builder + * + */ + public Builder header(Output header) { + $.header = header; + return this; + } + + /** + * @param header Header embodying a key and a value. + * Structure is documented below. + * + * @return builder + * + */ + public Builder header(QueueHttpTargetHeaderOverrideHeaderArgs header) { + return header(Output.of(header)); + } + + public QueueHttpTargetHeaderOverrideArgs build() { + if ($.header == null) { + throw new MissingRequiredPropertyException("QueueHttpTargetHeaderOverrideArgs", "header"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetHeaderOverrideHeaderArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetHeaderOverrideHeaderArgs.java new file mode 100644 index 0000000000..be9281d5c4 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetHeaderOverrideHeaderArgs.java @@ -0,0 +1,125 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + + +public final class QueueHttpTargetHeaderOverrideHeaderArgs extends com.pulumi.resources.ResourceArgs { + + public static final QueueHttpTargetHeaderOverrideHeaderArgs Empty = new QueueHttpTargetHeaderOverrideHeaderArgs(); + + /** + * The Key of the header. + * + */ + @Import(name="key", required=true) + private Output key; + + /** + * @return The Key of the header. + * + */ + public Output key() { + return this.key; + } + + /** + * The Value of the header. + * + */ + @Import(name="value", required=true) + private Output value; + + /** + * @return The Value of the header. + * + */ + public Output value() { + return this.value; + } + + private QueueHttpTargetHeaderOverrideHeaderArgs() {} + + private QueueHttpTargetHeaderOverrideHeaderArgs(QueueHttpTargetHeaderOverrideHeaderArgs $) { + this.key = $.key; + this.value = $.value; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(QueueHttpTargetHeaderOverrideHeaderArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private QueueHttpTargetHeaderOverrideHeaderArgs $; + + public Builder() { + $ = new QueueHttpTargetHeaderOverrideHeaderArgs(); + } + + public Builder(QueueHttpTargetHeaderOverrideHeaderArgs defaults) { + $ = new QueueHttpTargetHeaderOverrideHeaderArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param key The Key of the header. + * + * @return builder + * + */ + public Builder key(Output key) { + $.key = key; + return this; + } + + /** + * @param key The Key of the header. + * + * @return builder + * + */ + public Builder key(String key) { + return key(Output.of(key)); + } + + /** + * @param value The Value of the header. + * + * @return builder + * + */ + public Builder value(Output value) { + $.value = value; + return this; + } + + /** + * @param value The Value of the header. + * + * @return builder + * + */ + public Builder value(String value) { + return value(Output.of(value)); + } + + public QueueHttpTargetHeaderOverrideHeaderArgs build() { + if ($.key == null) { + throw new MissingRequiredPropertyException("QueueHttpTargetHeaderOverrideHeaderArgs", "key"); + } + if ($.value == null) { + throw new MissingRequiredPropertyException("QueueHttpTargetHeaderOverrideHeaderArgs", "value"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetOauthTokenArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetOauthTokenArgs.java new file mode 100644 index 0000000000..cef0ba86e4 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetOauthTokenArgs.java @@ -0,0 +1,136 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class QueueHttpTargetOauthTokenArgs extends com.pulumi.resources.ResourceArgs { + + public static final QueueHttpTargetOauthTokenArgs Empty = new QueueHttpTargetOauthTokenArgs(); + + /** + * OAuth scope to be used for generating OAuth access token. + * If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + * + */ + @Import(name="scope") + private @Nullable Output scope; + + /** + * @return OAuth scope to be used for generating OAuth access token. + * If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + * + */ + public Optional> scope() { + return Optional.ofNullable(this.scope); + } + + /** + * Service account email to be used for generating OAuth token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + * + */ + @Import(name="serviceAccountEmail", required=true) + private Output serviceAccountEmail; + + /** + * @return Service account email to be used for generating OAuth token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + * + */ + public Output serviceAccountEmail() { + return this.serviceAccountEmail; + } + + private QueueHttpTargetOauthTokenArgs() {} + + private QueueHttpTargetOauthTokenArgs(QueueHttpTargetOauthTokenArgs $) { + this.scope = $.scope; + this.serviceAccountEmail = $.serviceAccountEmail; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(QueueHttpTargetOauthTokenArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private QueueHttpTargetOauthTokenArgs $; + + public Builder() { + $ = new QueueHttpTargetOauthTokenArgs(); + } + + public Builder(QueueHttpTargetOauthTokenArgs defaults) { + $ = new QueueHttpTargetOauthTokenArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param scope OAuth scope to be used for generating OAuth access token. + * If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + * + * @return builder + * + */ + public Builder scope(@Nullable Output scope) { + $.scope = scope; + return this; + } + + /** + * @param scope OAuth scope to be used for generating OAuth access token. + * If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + * + * @return builder + * + */ + public Builder scope(String scope) { + return scope(Output.of(scope)); + } + + /** + * @param serviceAccountEmail Service account email to be used for generating OAuth token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + * + * @return builder + * + */ + public Builder serviceAccountEmail(Output serviceAccountEmail) { + $.serviceAccountEmail = serviceAccountEmail; + return this; + } + + /** + * @param serviceAccountEmail Service account email to be used for generating OAuth token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + * + * @return builder + * + */ + public Builder serviceAccountEmail(String serviceAccountEmail) { + return serviceAccountEmail(Output.of(serviceAccountEmail)); + } + + public QueueHttpTargetOauthTokenArgs build() { + if ($.serviceAccountEmail == null) { + throw new MissingRequiredPropertyException("QueueHttpTargetOauthTokenArgs", "serviceAccountEmail"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetOidcTokenArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetOidcTokenArgs.java new file mode 100644 index 0000000000..ddce654542 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetOidcTokenArgs.java @@ -0,0 +1,132 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class QueueHttpTargetOidcTokenArgs extends com.pulumi.resources.ResourceArgs { + + public static final QueueHttpTargetOidcTokenArgs Empty = new QueueHttpTargetOidcTokenArgs(); + + /** + * Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + * + */ + @Import(name="audience") + private @Nullable Output audience; + + /** + * @return Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + * + */ + public Optional> audience() { + return Optional.ofNullable(this.audience); + } + + /** + * Service account email to be used for generating OIDC token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + * + */ + @Import(name="serviceAccountEmail", required=true) + private Output serviceAccountEmail; + + /** + * @return Service account email to be used for generating OIDC token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + * + */ + public Output serviceAccountEmail() { + return this.serviceAccountEmail; + } + + private QueueHttpTargetOidcTokenArgs() {} + + private QueueHttpTargetOidcTokenArgs(QueueHttpTargetOidcTokenArgs $) { + this.audience = $.audience; + this.serviceAccountEmail = $.serviceAccountEmail; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(QueueHttpTargetOidcTokenArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private QueueHttpTargetOidcTokenArgs $; + + public Builder() { + $ = new QueueHttpTargetOidcTokenArgs(); + } + + public Builder(QueueHttpTargetOidcTokenArgs defaults) { + $ = new QueueHttpTargetOidcTokenArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param audience Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + * + * @return builder + * + */ + public Builder audience(@Nullable Output audience) { + $.audience = audience; + return this; + } + + /** + * @param audience Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + * + * @return builder + * + */ + public Builder audience(String audience) { + return audience(Output.of(audience)); + } + + /** + * @param serviceAccountEmail Service account email to be used for generating OIDC token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + * + * @return builder + * + */ + public Builder serviceAccountEmail(Output serviceAccountEmail) { + $.serviceAccountEmail = serviceAccountEmail; + return this; + } + + /** + * @param serviceAccountEmail Service account email to be used for generating OIDC token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + * + * @return builder + * + */ + public Builder serviceAccountEmail(String serviceAccountEmail) { + return serviceAccountEmail(Output.of(serviceAccountEmail)); + } + + public QueueHttpTargetOidcTokenArgs build() { + if ($.serviceAccountEmail == null) { + throw new MissingRequiredPropertyException("QueueHttpTargetOidcTokenArgs", "serviceAccountEmail"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetUriOverrideArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetUriOverrideArgs.java new file mode 100644 index 0000000000..5a0fb0f450 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetUriOverrideArgs.java @@ -0,0 +1,338 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverridePathOverrideArgs; +import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetUriOverrideQueryOverrideArgs; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class QueueHttpTargetUriOverrideArgs extends com.pulumi.resources.ResourceArgs { + + public static final QueueHttpTargetUriOverrideArgs Empty = new QueueHttpTargetUriOverrideArgs(); + + /** + * Host override. + * When specified, replaces the host part of the task URL. + * For example, if the task URL is "https://www.google.com", and host value + * is set to "example.net", the overridden URI will be changed to "https://example.net". + * Host value cannot be an empty string (INVALID_ARGUMENT). + * + */ + @Import(name="host") + private @Nullable Output host; + + /** + * @return Host override. + * When specified, replaces the host part of the task URL. + * For example, if the task URL is "https://www.google.com", and host value + * is set to "example.net", the overridden URI will be changed to "https://example.net". + * Host value cannot be an empty string (INVALID_ARGUMENT). + * + */ + public Optional> host() { + return Optional.ofNullable(this.host); + } + + /** + * URI path. + * When specified, replaces the existing path of the task URL. + * Setting the path value to an empty string clears the URI path segment. + * Structure is documented below. + * + */ + @Import(name="pathOverride") + private @Nullable Output pathOverride; + + /** + * @return URI path. + * When specified, replaces the existing path of the task URL. + * Setting the path value to an empty string clears the URI path segment. + * Structure is documented below. + * + */ + public Optional> pathOverride() { + return Optional.ofNullable(this.pathOverride); + } + + /** + * Port override. + * When specified, replaces the port part of the task URI. + * For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + * Note that the port value must be a positive integer. + * Setting the port to 0 (Zero) clears the URI port. + * + */ + @Import(name="port") + private @Nullable Output port; + + /** + * @return Port override. + * When specified, replaces the port part of the task URI. + * For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + * Note that the port value must be a positive integer. + * Setting the port to 0 (Zero) clears the URI port. + * + */ + public Optional> port() { + return Optional.ofNullable(this.port); + } + + /** + * URI query. + * When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + * Structure is documented below. + * + */ + @Import(name="queryOverride") + private @Nullable Output queryOverride; + + /** + * @return URI query. + * When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + * Structure is documented below. + * + */ + public Optional> queryOverride() { + return Optional.ofNullable(this.queryOverride); + } + + /** + * Scheme override. + * When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + * Possible values are: `HTTP`, `HTTPS`. + * + */ + @Import(name="scheme") + private @Nullable Output scheme; + + /** + * @return Scheme override. + * When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + * Possible values are: `HTTP`, `HTTPS`. + * + */ + public Optional> scheme() { + return Optional.ofNullable(this.scheme); + } + + /** + * URI Override Enforce Mode + * When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + * Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + * + */ + @Import(name="uriOverrideEnforceMode") + private @Nullable Output uriOverrideEnforceMode; + + /** + * @return URI Override Enforce Mode + * When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + * Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + * + */ + public Optional> uriOverrideEnforceMode() { + return Optional.ofNullable(this.uriOverrideEnforceMode); + } + + private QueueHttpTargetUriOverrideArgs() {} + + private QueueHttpTargetUriOverrideArgs(QueueHttpTargetUriOverrideArgs $) { + this.host = $.host; + this.pathOverride = $.pathOverride; + this.port = $.port; + this.queryOverride = $.queryOverride; + this.scheme = $.scheme; + this.uriOverrideEnforceMode = $.uriOverrideEnforceMode; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(QueueHttpTargetUriOverrideArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private QueueHttpTargetUriOverrideArgs $; + + public Builder() { + $ = new QueueHttpTargetUriOverrideArgs(); + } + + public Builder(QueueHttpTargetUriOverrideArgs defaults) { + $ = new QueueHttpTargetUriOverrideArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param host Host override. + * When specified, replaces the host part of the task URL. + * For example, if the task URL is "https://www.google.com", and host value + * is set to "example.net", the overridden URI will be changed to "https://example.net". + * Host value cannot be an empty string (INVALID_ARGUMENT). + * + * @return builder + * + */ + public Builder host(@Nullable Output host) { + $.host = host; + return this; + } + + /** + * @param host Host override. + * When specified, replaces the host part of the task URL. + * For example, if the task URL is "https://www.google.com", and host value + * is set to "example.net", the overridden URI will be changed to "https://example.net". + * Host value cannot be an empty string (INVALID_ARGUMENT). + * + * @return builder + * + */ + public Builder host(String host) { + return host(Output.of(host)); + } + + /** + * @param pathOverride URI path. + * When specified, replaces the existing path of the task URL. + * Setting the path value to an empty string clears the URI path segment. + * Structure is documented below. + * + * @return builder + * + */ + public Builder pathOverride(@Nullable Output pathOverride) { + $.pathOverride = pathOverride; + return this; + } + + /** + * @param pathOverride URI path. + * When specified, replaces the existing path of the task URL. + * Setting the path value to an empty string clears the URI path segment. + * Structure is documented below. + * + * @return builder + * + */ + public Builder pathOverride(QueueHttpTargetUriOverridePathOverrideArgs pathOverride) { + return pathOverride(Output.of(pathOverride)); + } + + /** + * @param port Port override. + * When specified, replaces the port part of the task URI. + * For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + * Note that the port value must be a positive integer. + * Setting the port to 0 (Zero) clears the URI port. + * + * @return builder + * + */ + public Builder port(@Nullable Output port) { + $.port = port; + return this; + } + + /** + * @param port Port override. + * When specified, replaces the port part of the task URI. + * For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + * Note that the port value must be a positive integer. + * Setting the port to 0 (Zero) clears the URI port. + * + * @return builder + * + */ + public Builder port(String port) { + return port(Output.of(port)); + } + + /** + * @param queryOverride URI query. + * When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + * Structure is documented below. + * + * @return builder + * + */ + public Builder queryOverride(@Nullable Output queryOverride) { + $.queryOverride = queryOverride; + return this; + } + + /** + * @param queryOverride URI query. + * When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + * Structure is documented below. + * + * @return builder + * + */ + public Builder queryOverride(QueueHttpTargetUriOverrideQueryOverrideArgs queryOverride) { + return queryOverride(Output.of(queryOverride)); + } + + /** + * @param scheme Scheme override. + * When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + * Possible values are: `HTTP`, `HTTPS`. + * + * @return builder + * + */ + public Builder scheme(@Nullable Output scheme) { + $.scheme = scheme; + return this; + } + + /** + * @param scheme Scheme override. + * When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + * Possible values are: `HTTP`, `HTTPS`. + * + * @return builder + * + */ + public Builder scheme(String scheme) { + return scheme(Output.of(scheme)); + } + + /** + * @param uriOverrideEnforceMode URI Override Enforce Mode + * When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + * Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + * + * @return builder + * + */ + public Builder uriOverrideEnforceMode(@Nullable Output uriOverrideEnforceMode) { + $.uriOverrideEnforceMode = uriOverrideEnforceMode; + return this; + } + + /** + * @param uriOverrideEnforceMode URI Override Enforce Mode + * When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + * Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + * + * @return builder + * + */ + public Builder uriOverrideEnforceMode(String uriOverrideEnforceMode) { + return uriOverrideEnforceMode(Output.of(uriOverrideEnforceMode)); + } + + public QueueHttpTargetUriOverrideArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetUriOverridePathOverrideArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetUriOverridePathOverrideArgs.java new file mode 100644 index 0000000000..e01ab2076d --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetUriOverridePathOverrideArgs.java @@ -0,0 +1,83 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class QueueHttpTargetUriOverridePathOverrideArgs extends com.pulumi.resources.ResourceArgs { + + public static final QueueHttpTargetUriOverridePathOverrideArgs Empty = new QueueHttpTargetUriOverridePathOverrideArgs(); + + /** + * The URI path (e.g., /users/1234). Default is an empty string. + * + */ + @Import(name="path") + private @Nullable Output path; + + /** + * @return The URI path (e.g., /users/1234). Default is an empty string. + * + */ + public Optional> path() { + return Optional.ofNullable(this.path); + } + + private QueueHttpTargetUriOverridePathOverrideArgs() {} + + private QueueHttpTargetUriOverridePathOverrideArgs(QueueHttpTargetUriOverridePathOverrideArgs $) { + this.path = $.path; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(QueueHttpTargetUriOverridePathOverrideArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private QueueHttpTargetUriOverridePathOverrideArgs $; + + public Builder() { + $ = new QueueHttpTargetUriOverridePathOverrideArgs(); + } + + public Builder(QueueHttpTargetUriOverridePathOverrideArgs defaults) { + $ = new QueueHttpTargetUriOverridePathOverrideArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param path The URI path (e.g., /users/1234). Default is an empty string. + * + * @return builder + * + */ + public Builder path(@Nullable Output path) { + $.path = path; + return this; + } + + /** + * @param path The URI path (e.g., /users/1234). Default is an empty string. + * + * @return builder + * + */ + public Builder path(String path) { + return path(Output.of(path)); + } + + public QueueHttpTargetUriOverridePathOverrideArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetUriOverrideQueryOverrideArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetUriOverrideQueryOverrideArgs.java new file mode 100644 index 0000000000..09244e4293 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueHttpTargetUriOverrideQueryOverrideArgs.java @@ -0,0 +1,83 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class QueueHttpTargetUriOverrideQueryOverrideArgs extends com.pulumi.resources.ResourceArgs { + + public static final QueueHttpTargetUriOverrideQueryOverrideArgs Empty = new QueueHttpTargetUriOverrideQueryOverrideArgs(); + + /** + * The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + * + */ + @Import(name="queryParams") + private @Nullable Output queryParams; + + /** + * @return The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + * + */ + public Optional> queryParams() { + return Optional.ofNullable(this.queryParams); + } + + private QueueHttpTargetUriOverrideQueryOverrideArgs() {} + + private QueueHttpTargetUriOverrideQueryOverrideArgs(QueueHttpTargetUriOverrideQueryOverrideArgs $) { + this.queryParams = $.queryParams; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(QueueHttpTargetUriOverrideQueryOverrideArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private QueueHttpTargetUriOverrideQueryOverrideArgs $; + + public Builder() { + $ = new QueueHttpTargetUriOverrideQueryOverrideArgs(); + } + + public Builder(QueueHttpTargetUriOverrideQueryOverrideArgs defaults) { + $ = new QueueHttpTargetUriOverrideQueryOverrideArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param queryParams The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + * + * @return builder + * + */ + public Builder queryParams(@Nullable Output queryParams) { + $.queryParams = queryParams; + return this; + } + + /** + * @param queryParams The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + * + * @return builder + * + */ + public Builder queryParams(String queryParams) { + return queryParams(Output.of(queryParams)); + } + + public QueueHttpTargetUriOverrideQueryOverrideArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueState.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueState.java index 2b89bb5f81..b74b53e9d2 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/inputs/QueueState.java @@ -6,6 +6,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; import com.pulumi.gcp.cloudtasks.inputs.QueueAppEngineRoutingOverrideArgs; +import com.pulumi.gcp.cloudtasks.inputs.QueueHttpTargetArgs; import com.pulumi.gcp.cloudtasks.inputs.QueueRateLimitsArgs; import com.pulumi.gcp.cloudtasks.inputs.QueueRetryConfigArgs; import com.pulumi.gcp.cloudtasks.inputs.QueueStackdriverLoggingConfigArgs; @@ -38,6 +39,23 @@ public Optional> appEngineRoutingOverr return Optional.ofNullable(this.appEngineRoutingOverride); } + /** + * Modifies HTTP target for HTTP tasks. + * Structure is documented below. + * + */ + @Import(name="httpTarget") + private @Nullable Output httpTarget; + + /** + * @return Modifies HTTP target for HTTP tasks. + * Structure is documented below. + * + */ + public Optional> httpTarget() { + return Optional.ofNullable(this.httpTarget); + } + /** * The location of the queue * @@ -156,6 +174,7 @@ private QueueState() {} private QueueState(QueueState $) { this.appEngineRoutingOverride = $.appEngineRoutingOverride; + this.httpTarget = $.httpTarget; this.location = $.location; this.name = $.name; this.project = $.project; @@ -207,6 +226,29 @@ public Builder appEngineRoutingOverride(QueueAppEngineRoutingOverrideArgs appEng return appEngineRoutingOverride(Output.of(appEngineRoutingOverride)); } + /** + * @param httpTarget Modifies HTTP target for HTTP tasks. + * Structure is documented below. + * + * @return builder + * + */ + public Builder httpTarget(@Nullable Output httpTarget) { + $.httpTarget = httpTarget; + return this; + } + + /** + * @param httpTarget Modifies HTTP target for HTTP tasks. + * Structure is documented below. + * + * @return builder + * + */ + public Builder httpTarget(QueueHttpTargetArgs httpTarget) { + return httpTarget(Output.of(httpTarget)); + } + /** * @param location The location of the queue * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTarget.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTarget.java new file mode 100644 index 0000000000..0866ddf13d --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTarget.java @@ -0,0 +1,185 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.gcp.cloudtasks.outputs.QueueHttpTargetHeaderOverride; +import com.pulumi.gcp.cloudtasks.outputs.QueueHttpTargetOauthToken; +import com.pulumi.gcp.cloudtasks.outputs.QueueHttpTargetOidcToken; +import com.pulumi.gcp.cloudtasks.outputs.QueueHttpTargetUriOverride; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class QueueHttpTarget { + /** + * @return HTTP target headers. + * This map contains the header field names and values. + * Headers will be set when running the CreateTask and/or BufferTask. + * These headers represent a subset of the headers that will be configured for the task's HTTP request. + * Some HTTP request headers will be ignored or replaced. + * Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + * The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + * Structure is documented below. + * + */ + private @Nullable List headerOverrides; + /** + * @return The HTTP method to use for the request. + * When specified, it overrides HttpRequest for the task. + * Note that if the value is set to GET the body of the task will be ignored at execution time. + * Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + * + */ + private @Nullable String httpMethod; + /** + * @return If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + * This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + * Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + * Structure is documented below. + * + */ + private @Nullable QueueHttpTargetOauthToken oauthToken; + /** + * @return If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + * This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + * Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + * Structure is documented below. + * + */ + private @Nullable QueueHttpTargetOidcToken oidcToken; + /** + * @return URI override. + * When specified, overrides the execution URI for all the tasks in the queue. + * Structure is documented below. + * + */ + private @Nullable QueueHttpTargetUriOverride uriOverride; + + private QueueHttpTarget() {} + /** + * @return HTTP target headers. + * This map contains the header field names and values. + * Headers will be set when running the CreateTask and/or BufferTask. + * These headers represent a subset of the headers that will be configured for the task's HTTP request. + * Some HTTP request headers will be ignored or replaced. + * Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + * The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + * Structure is documented below. + * + */ + public List headerOverrides() { + return this.headerOverrides == null ? List.of() : this.headerOverrides; + } + /** + * @return The HTTP method to use for the request. + * When specified, it overrides HttpRequest for the task. + * Note that if the value is set to GET the body of the task will be ignored at execution time. + * Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + * + */ + public Optional httpMethod() { + return Optional.ofNullable(this.httpMethod); + } + /** + * @return If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + * This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + * Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + * Structure is documented below. + * + */ + public Optional oauthToken() { + return Optional.ofNullable(this.oauthToken); + } + /** + * @return If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + * This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + * Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + * Structure is documented below. + * + */ + public Optional oidcToken() { + return Optional.ofNullable(this.oidcToken); + } + /** + * @return URI override. + * When specified, overrides the execution URI for all the tasks in the queue. + * Structure is documented below. + * + */ + public Optional uriOverride() { + return Optional.ofNullable(this.uriOverride); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(QueueHttpTarget defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable List headerOverrides; + private @Nullable String httpMethod; + private @Nullable QueueHttpTargetOauthToken oauthToken; + private @Nullable QueueHttpTargetOidcToken oidcToken; + private @Nullable QueueHttpTargetUriOverride uriOverride; + public Builder() {} + public Builder(QueueHttpTarget defaults) { + Objects.requireNonNull(defaults); + this.headerOverrides = defaults.headerOverrides; + this.httpMethod = defaults.httpMethod; + this.oauthToken = defaults.oauthToken; + this.oidcToken = defaults.oidcToken; + this.uriOverride = defaults.uriOverride; + } + + @CustomType.Setter + public Builder headerOverrides(@Nullable List headerOverrides) { + + this.headerOverrides = headerOverrides; + return this; + } + public Builder headerOverrides(QueueHttpTargetHeaderOverride... headerOverrides) { + return headerOverrides(List.of(headerOverrides)); + } + @CustomType.Setter + public Builder httpMethod(@Nullable String httpMethod) { + + this.httpMethod = httpMethod; + return this; + } + @CustomType.Setter + public Builder oauthToken(@Nullable QueueHttpTargetOauthToken oauthToken) { + + this.oauthToken = oauthToken; + return this; + } + @CustomType.Setter + public Builder oidcToken(@Nullable QueueHttpTargetOidcToken oidcToken) { + + this.oidcToken = oidcToken; + return this; + } + @CustomType.Setter + public Builder uriOverride(@Nullable QueueHttpTargetUriOverride uriOverride) { + + this.uriOverride = uriOverride; + return this; + } + public QueueHttpTarget build() { + final var _resultValue = new QueueHttpTarget(); + _resultValue.headerOverrides = headerOverrides; + _resultValue.httpMethod = httpMethod; + _resultValue.oauthToken = oauthToken; + _resultValue.oidcToken = oidcToken; + _resultValue.uriOverride = uriOverride; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetHeaderOverride.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetHeaderOverride.java new file mode 100644 index 0000000000..d0b12b0546 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetHeaderOverride.java @@ -0,0 +1,60 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.cloudtasks.outputs.QueueHttpTargetHeaderOverrideHeader; +import java.util.Objects; + +@CustomType +public final class QueueHttpTargetHeaderOverride { + /** + * @return Header embodying a key and a value. + * Structure is documented below. + * + */ + private QueueHttpTargetHeaderOverrideHeader header; + + private QueueHttpTargetHeaderOverride() {} + /** + * @return Header embodying a key and a value. + * Structure is documented below. + * + */ + public QueueHttpTargetHeaderOverrideHeader header() { + return this.header; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(QueueHttpTargetHeaderOverride defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private QueueHttpTargetHeaderOverrideHeader header; + public Builder() {} + public Builder(QueueHttpTargetHeaderOverride defaults) { + Objects.requireNonNull(defaults); + this.header = defaults.header; + } + + @CustomType.Setter + public Builder header(QueueHttpTargetHeaderOverrideHeader header) { + if (header == null) { + throw new MissingRequiredPropertyException("QueueHttpTargetHeaderOverride", "header"); + } + this.header = header; + return this; + } + public QueueHttpTargetHeaderOverride build() { + final var _resultValue = new QueueHttpTargetHeaderOverride(); + _resultValue.header = header; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetHeaderOverrideHeader.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetHeaderOverrideHeader.java new file mode 100644 index 0000000000..b1f54d72d6 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetHeaderOverrideHeader.java @@ -0,0 +1,81 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class QueueHttpTargetHeaderOverrideHeader { + /** + * @return The Key of the header. + * + */ + private String key; + /** + * @return The Value of the header. + * + */ + private String value; + + private QueueHttpTargetHeaderOverrideHeader() {} + /** + * @return The Key of the header. + * + */ + public String key() { + return this.key; + } + /** + * @return The Value of the header. + * + */ + public String value() { + return this.value; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(QueueHttpTargetHeaderOverrideHeader defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String key; + private String value; + public Builder() {} + public Builder(QueueHttpTargetHeaderOverrideHeader defaults) { + Objects.requireNonNull(defaults); + this.key = defaults.key; + this.value = defaults.value; + } + + @CustomType.Setter + public Builder key(String key) { + if (key == null) { + throw new MissingRequiredPropertyException("QueueHttpTargetHeaderOverrideHeader", "key"); + } + this.key = key; + return this; + } + @CustomType.Setter + public Builder value(String value) { + if (value == null) { + throw new MissingRequiredPropertyException("QueueHttpTargetHeaderOverrideHeader", "value"); + } + this.value = value; + return this; + } + public QueueHttpTargetHeaderOverrideHeader build() { + final var _resultValue = new QueueHttpTargetHeaderOverrideHeader(); + _resultValue.key = key; + _resultValue.value = value; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetOauthToken.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetOauthToken.java new file mode 100644 index 0000000000..a96ba63e37 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetOauthToken.java @@ -0,0 +1,87 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class QueueHttpTargetOauthToken { + /** + * @return OAuth scope to be used for generating OAuth access token. + * If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + * + */ + private @Nullable String scope; + /** + * @return Service account email to be used for generating OAuth token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + * + */ + private String serviceAccountEmail; + + private QueueHttpTargetOauthToken() {} + /** + * @return OAuth scope to be used for generating OAuth access token. + * If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + * + */ + public Optional scope() { + return Optional.ofNullable(this.scope); + } + /** + * @return Service account email to be used for generating OAuth token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + * + */ + public String serviceAccountEmail() { + return this.serviceAccountEmail; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(QueueHttpTargetOauthToken defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String scope; + private String serviceAccountEmail; + public Builder() {} + public Builder(QueueHttpTargetOauthToken defaults) { + Objects.requireNonNull(defaults); + this.scope = defaults.scope; + this.serviceAccountEmail = defaults.serviceAccountEmail; + } + + @CustomType.Setter + public Builder scope(@Nullable String scope) { + + this.scope = scope; + return this; + } + @CustomType.Setter + public Builder serviceAccountEmail(String serviceAccountEmail) { + if (serviceAccountEmail == null) { + throw new MissingRequiredPropertyException("QueueHttpTargetOauthToken", "serviceAccountEmail"); + } + this.serviceAccountEmail = serviceAccountEmail; + return this; + } + public QueueHttpTargetOauthToken build() { + final var _resultValue = new QueueHttpTargetOauthToken(); + _resultValue.scope = scope; + _resultValue.serviceAccountEmail = serviceAccountEmail; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetOidcToken.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetOidcToken.java new file mode 100644 index 0000000000..ecfdedc1bc --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetOidcToken.java @@ -0,0 +1,85 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class QueueHttpTargetOidcToken { + /** + * @return Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + * + */ + private @Nullable String audience; + /** + * @return Service account email to be used for generating OIDC token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + * + */ + private String serviceAccountEmail; + + private QueueHttpTargetOidcToken() {} + /** + * @return Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + * + */ + public Optional audience() { + return Optional.ofNullable(this.audience); + } + /** + * @return Service account email to be used for generating OIDC token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + * + */ + public String serviceAccountEmail() { + return this.serviceAccountEmail; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(QueueHttpTargetOidcToken defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String audience; + private String serviceAccountEmail; + public Builder() {} + public Builder(QueueHttpTargetOidcToken defaults) { + Objects.requireNonNull(defaults); + this.audience = defaults.audience; + this.serviceAccountEmail = defaults.serviceAccountEmail; + } + + @CustomType.Setter + public Builder audience(@Nullable String audience) { + + this.audience = audience; + return this; + } + @CustomType.Setter + public Builder serviceAccountEmail(String serviceAccountEmail) { + if (serviceAccountEmail == null) { + throw new MissingRequiredPropertyException("QueueHttpTargetOidcToken", "serviceAccountEmail"); + } + this.serviceAccountEmail = serviceAccountEmail; + return this; + } + public QueueHttpTargetOidcToken build() { + final var _resultValue = new QueueHttpTargetOidcToken(); + _resultValue.audience = audience; + _resultValue.serviceAccountEmail = serviceAccountEmail; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetUriOverride.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetUriOverride.java new file mode 100644 index 0000000000..03900edcb1 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetUriOverride.java @@ -0,0 +1,198 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.gcp.cloudtasks.outputs.QueueHttpTargetUriOverridePathOverride; +import com.pulumi.gcp.cloudtasks.outputs.QueueHttpTargetUriOverrideQueryOverride; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class QueueHttpTargetUriOverride { + /** + * @return Host override. + * When specified, replaces the host part of the task URL. + * For example, if the task URL is "https://www.google.com", and host value + * is set to "example.net", the overridden URI will be changed to "https://example.net". + * Host value cannot be an empty string (INVALID_ARGUMENT). + * + */ + private @Nullable String host; + /** + * @return URI path. + * When specified, replaces the existing path of the task URL. + * Setting the path value to an empty string clears the URI path segment. + * Structure is documented below. + * + */ + private @Nullable QueueHttpTargetUriOverridePathOverride pathOverride; + /** + * @return Port override. + * When specified, replaces the port part of the task URI. + * For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + * Note that the port value must be a positive integer. + * Setting the port to 0 (Zero) clears the URI port. + * + */ + private @Nullable String port; + /** + * @return URI query. + * When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + * Structure is documented below. + * + */ + private @Nullable QueueHttpTargetUriOverrideQueryOverride queryOverride; + /** + * @return Scheme override. + * When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + * Possible values are: `HTTP`, `HTTPS`. + * + */ + private @Nullable String scheme; + /** + * @return URI Override Enforce Mode + * When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + * Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + * + */ + private @Nullable String uriOverrideEnforceMode; + + private QueueHttpTargetUriOverride() {} + /** + * @return Host override. + * When specified, replaces the host part of the task URL. + * For example, if the task URL is "https://www.google.com", and host value + * is set to "example.net", the overridden URI will be changed to "https://example.net". + * Host value cannot be an empty string (INVALID_ARGUMENT). + * + */ + public Optional host() { + return Optional.ofNullable(this.host); + } + /** + * @return URI path. + * When specified, replaces the existing path of the task URL. + * Setting the path value to an empty string clears the URI path segment. + * Structure is documented below. + * + */ + public Optional pathOverride() { + return Optional.ofNullable(this.pathOverride); + } + /** + * @return Port override. + * When specified, replaces the port part of the task URI. + * For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + * Note that the port value must be a positive integer. + * Setting the port to 0 (Zero) clears the URI port. + * + */ + public Optional port() { + return Optional.ofNullable(this.port); + } + /** + * @return URI query. + * When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + * Structure is documented below. + * + */ + public Optional queryOverride() { + return Optional.ofNullable(this.queryOverride); + } + /** + * @return Scheme override. + * When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + * Possible values are: `HTTP`, `HTTPS`. + * + */ + public Optional scheme() { + return Optional.ofNullable(this.scheme); + } + /** + * @return URI Override Enforce Mode + * When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + * Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + * + */ + public Optional uriOverrideEnforceMode() { + return Optional.ofNullable(this.uriOverrideEnforceMode); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(QueueHttpTargetUriOverride defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String host; + private @Nullable QueueHttpTargetUriOverridePathOverride pathOverride; + private @Nullable String port; + private @Nullable QueueHttpTargetUriOverrideQueryOverride queryOverride; + private @Nullable String scheme; + private @Nullable String uriOverrideEnforceMode; + public Builder() {} + public Builder(QueueHttpTargetUriOverride defaults) { + Objects.requireNonNull(defaults); + this.host = defaults.host; + this.pathOverride = defaults.pathOverride; + this.port = defaults.port; + this.queryOverride = defaults.queryOverride; + this.scheme = defaults.scheme; + this.uriOverrideEnforceMode = defaults.uriOverrideEnforceMode; + } + + @CustomType.Setter + public Builder host(@Nullable String host) { + + this.host = host; + return this; + } + @CustomType.Setter + public Builder pathOverride(@Nullable QueueHttpTargetUriOverridePathOverride pathOverride) { + + this.pathOverride = pathOverride; + return this; + } + @CustomType.Setter + public Builder port(@Nullable String port) { + + this.port = port; + return this; + } + @CustomType.Setter + public Builder queryOverride(@Nullable QueueHttpTargetUriOverrideQueryOverride queryOverride) { + + this.queryOverride = queryOverride; + return this; + } + @CustomType.Setter + public Builder scheme(@Nullable String scheme) { + + this.scheme = scheme; + return this; + } + @CustomType.Setter + public Builder uriOverrideEnforceMode(@Nullable String uriOverrideEnforceMode) { + + this.uriOverrideEnforceMode = uriOverrideEnforceMode; + return this; + } + public QueueHttpTargetUriOverride build() { + final var _resultValue = new QueueHttpTargetUriOverride(); + _resultValue.host = host; + _resultValue.pathOverride = pathOverride; + _resultValue.port = port; + _resultValue.queryOverride = queryOverride; + _resultValue.scheme = scheme; + _resultValue.uriOverrideEnforceMode = uriOverrideEnforceMode; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetUriOverridePathOverride.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetUriOverridePathOverride.java new file mode 100644 index 0000000000..c46f6e22cb --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetUriOverridePathOverride.java @@ -0,0 +1,57 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class QueueHttpTargetUriOverridePathOverride { + /** + * @return The URI path (e.g., /users/1234). Default is an empty string. + * + */ + private @Nullable String path; + + private QueueHttpTargetUriOverridePathOverride() {} + /** + * @return The URI path (e.g., /users/1234). Default is an empty string. + * + */ + public Optional path() { + return Optional.ofNullable(this.path); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(QueueHttpTargetUriOverridePathOverride defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String path; + public Builder() {} + public Builder(QueueHttpTargetUriOverridePathOverride defaults) { + Objects.requireNonNull(defaults); + this.path = defaults.path; + } + + @CustomType.Setter + public Builder path(@Nullable String path) { + + this.path = path; + return this; + } + public QueueHttpTargetUriOverridePathOverride build() { + final var _resultValue = new QueueHttpTargetUriOverridePathOverride(); + _resultValue.path = path; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetUriOverrideQueryOverride.java b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetUriOverrideQueryOverride.java new file mode 100644 index 0000000000..a33744cedd --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/cloudtasks/outputs/QueueHttpTargetUriOverrideQueryOverride.java @@ -0,0 +1,57 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.cloudtasks.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class QueueHttpTargetUriOverrideQueryOverride { + /** + * @return The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + * + */ + private @Nullable String queryParams; + + private QueueHttpTargetUriOverrideQueryOverride() {} + /** + * @return The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + * + */ + public Optional queryParams() { + return Optional.ofNullable(this.queryParams); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(QueueHttpTargetUriOverrideQueryOverride defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String queryParams; + public Builder() {} + public Builder(QueueHttpTargetUriOverrideQueryOverride defaults) { + Objects.requireNonNull(defaults); + this.queryParams = defaults.queryParams; + } + + @CustomType.Setter + public Builder queryParams(@Nullable String queryParams) { + + this.queryParams = queryParams; + return this; + } + public QueueHttpTargetUriOverrideQueryOverride build() { + final var _resultValue = new QueueHttpTargetUriOverrideQueryOverride(); + _resultValue.queryParams = queryParams; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/HealthCheck.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/HealthCheck.java index 7064f4e7a3..9b202ea176 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/HealthCheck.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/HealthCheck.java @@ -612,6 +612,138 @@ * } * * <!--End PulumiCodeChooser --> + * ### Compute Health Check Http Source Regions + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.compute.HealthCheck;
+ * import com.pulumi.gcp.compute.HealthCheckArgs;
+ * import com.pulumi.gcp.compute.inputs.HealthCheckHttpHealthCheckArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var http_health_check_with_source_regions = new HealthCheck("http-health-check-with-source-regions", HealthCheckArgs.builder()
+ *             .name("http-health-check")
+ *             .checkIntervalSec(30)
+ *             .httpHealthCheck(HealthCheckHttpHealthCheckArgs.builder()
+ *                 .port(80)
+ *                 .portSpecification("USE_FIXED_PORT")
+ *                 .build())
+ *             .sourceRegions(            
+ *                 "us-west1",
+ *                 "us-central1",
+ *                 "us-east5")
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> + * ### Compute Health Check Https Source Regions + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.compute.HealthCheck;
+ * import com.pulumi.gcp.compute.HealthCheckArgs;
+ * import com.pulumi.gcp.compute.inputs.HealthCheckHttpsHealthCheckArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var https_health_check_with_source_regions = new HealthCheck("https-health-check-with-source-regions", HealthCheckArgs.builder()
+ *             .name("https-health-check")
+ *             .checkIntervalSec(30)
+ *             .httpsHealthCheck(HealthCheckHttpsHealthCheckArgs.builder()
+ *                 .port(80)
+ *                 .portSpecification("USE_FIXED_PORT")
+ *                 .build())
+ *             .sourceRegions(            
+ *                 "us-west1",
+ *                 "us-central1",
+ *                 "us-east5")
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> + * ### Compute Health Check Tcp Source Regions + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.compute.HealthCheck;
+ * import com.pulumi.gcp.compute.HealthCheckArgs;
+ * import com.pulumi.gcp.compute.inputs.HealthCheckTcpHealthCheckArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var tcp_health_check_with_source_regions = new HealthCheck("tcp-health-check-with-source-regions", HealthCheckArgs.builder()
+ *             .name("tcp-health-check")
+ *             .checkIntervalSec(30)
+ *             .tcpHealthCheck(HealthCheckTcpHealthCheckArgs.builder()
+ *                 .port(80)
+ *                 .portSpecification("USE_FIXED_PORT")
+ *                 .build())
+ *             .sourceRegions(            
+ *                 "us-west1",
+ *                 "us-central1",
+ *                 "us-east5")
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> * * ## Import * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/Instance.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/Instance.java index ce522388bf..317b3c4c12 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/Instance.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/Instance.java @@ -107,6 +107,80 @@ * * <!--End PulumiCodeChooser --> * + * ### Confidential Computing + * + * Example with [Confidential Mode](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) activated. + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.serviceaccount.Account;
+ * import com.pulumi.gcp.serviceaccount.AccountArgs;
+ * import com.pulumi.gcp.compute.Instance;
+ * import com.pulumi.gcp.compute.InstanceArgs;
+ * import com.pulumi.gcp.compute.inputs.InstanceNetworkInterfaceArgs;
+ * import com.pulumi.gcp.compute.inputs.InstanceConfidentialInstanceConfigArgs;
+ * import com.pulumi.gcp.compute.inputs.InstanceBootDiskArgs;
+ * import com.pulumi.gcp.compute.inputs.InstanceBootDiskInitializeParamsArgs;
+ * import com.pulumi.gcp.compute.inputs.InstanceScratchDiskArgs;
+ * import com.pulumi.gcp.compute.inputs.InstanceServiceAccountArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var default_ = new Account("default", AccountArgs.builder()
+ *             .accountId("my-custom-sa")
+ *             .displayName("Custom SA for VM Instance")
+ *             .build());
+ * 
+ *         var confidentialInstance = new Instance("confidentialInstance", InstanceArgs.builder()
+ *             .networkInterfaces(InstanceNetworkInterfaceArgs.builder()
+ *                 .accessConfigs()
+ *                 .network("default")
+ *                 .build())
+ *             .name("my-confidential-instance")
+ *             .zone("us-central1-a")
+ *             .machineType("n2d-standard-2")
+ *             .minCpuPlatform("AMD Milan")
+ *             .confidentialInstanceConfig(InstanceConfidentialInstanceConfigArgs.builder()
+ *                 .enableConfidentialCompute(true)
+ *                 .confidentialInstanceType("SEV")
+ *                 .build())
+ *             .bootDisk(InstanceBootDiskArgs.builder()
+ *                 .initializeParams(InstanceBootDiskInitializeParamsArgs.builder()
+ *                     .image("ubuntu-os-cloud/ubuntu-2004-lts")
+ *                     .labels(Map.of("my_label", "value"))
+ *                     .build())
+ *                 .build())
+ *             .scratchDisks(InstanceScratchDiskArgs.builder()
+ *                 .interface_("NVME")
+ *                 .build())
+ *             .serviceAccount(InstanceServiceAccountArgs.builder()
+ *                 .email(default_.email())
+ *                 .scopes("cloud-platform")
+ *                 .build())
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> + * * ## Import * * Instances can be imported using any of these accepted formats: @@ -241,14 +315,14 @@ public Output cpuPlatform() { return this.cpuPlatform; } /** - * The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + * The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). * */ @Export(name="currentStatus", refs={String.class}, tree="[0]") private Output currentStatus; /** - * @return The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + * @return The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). * */ public Output currentStatus() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/InstanceTemplate.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/InstanceTemplate.java index c1e028a5ee..9ff18c5a9b 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/InstanceTemplate.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/InstanceTemplate.java @@ -254,6 +254,72 @@ * * <!--End PulumiCodeChooser --> * + * ### Confidential Computing + * + * Example with [Confidential Mode](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) activated. + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.serviceaccount.Account;
+ * import com.pulumi.gcp.serviceaccount.AccountArgs;
+ * import com.pulumi.gcp.compute.InstanceTemplate;
+ * import com.pulumi.gcp.compute.InstanceTemplateArgs;
+ * import com.pulumi.gcp.compute.inputs.InstanceTemplateNetworkInterfaceArgs;
+ * import com.pulumi.gcp.compute.inputs.InstanceTemplateConfidentialInstanceConfigArgs;
+ * import com.pulumi.gcp.compute.inputs.InstanceTemplateDiskArgs;
+ * import com.pulumi.gcp.compute.inputs.InstanceTemplateServiceAccountArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var default_ = new Account("default", AccountArgs.builder()
+ *             .accountId("my-custom-sa")
+ *             .displayName("Custom SA for VM Instance")
+ *             .build());
+ * 
+ *         var confidentialInstanceTemplate = new InstanceTemplate("confidentialInstanceTemplate", InstanceTemplateArgs.builder()
+ *             .networkInterfaces(InstanceTemplateNetworkInterfaceArgs.builder()
+ *                 .accessConfigs()
+ *                 .network("default")
+ *                 .build())
+ *             .name("my-confidential-instance-template")
+ *             .region("us-central1")
+ *             .machineType("n2d-standard-2")
+ *             .minCpuPlatform("AMD Milan")
+ *             .confidentialInstanceConfig(InstanceTemplateConfidentialInstanceConfigArgs.builder()
+ *                 .enableConfidentialCompute(true)
+ *                 .confidentialInstanceType("SEV")
+ *                 .build())
+ *             .disks(InstanceTemplateDiskArgs.builder()
+ *                 .sourceImage("ubuntu-os-cloud/ubuntu-2004-lts")
+ *                 .build())
+ *             .serviceAccount(InstanceTemplateServiceAccountArgs.builder()
+ *                 .email(default_.email())
+ *                 .scopes("cloud-platform")
+ *                 .build())
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> + * * ## Deploying the Latest Image * * A common way to use instance templates and managed instance groups is to deploy the diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/Interconnect.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/Interconnect.java index fb931e043d..788146f745 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/Interconnect.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/Interconnect.java @@ -569,22 +569,24 @@ public Output> remoteLocation() { return Codegen.optional(this.remoteLocation); } /** - * interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. * */ @Export(name="requestedFeatures", refs={List.class,String.class}, tree="[0,1]") private Output> requestedFeatures; /** - * @return interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * @return interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. * */ public Output>> requestedFeatures() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/InterconnectArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/InterconnectArgs.java index c2f96615da..524d831204 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/InterconnectArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/InterconnectArgs.java @@ -277,22 +277,24 @@ public Optional> remoteLocation() { } /** - * interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. * */ @Import(name="requestedFeatures") private @Nullable Output> requestedFeatures; /** - * @return interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * @return interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. * */ public Optional>> requestedFeatures() { @@ -686,11 +688,12 @@ public Builder remoteLocation(String remoteLocation) { } /** - * @param requestedFeatures interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * @param requestedFeatures interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. * * @return builder * @@ -701,11 +704,12 @@ public Builder requestedFeatures(@Nullable Output> requestedFeature } /** - * @param requestedFeatures interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * @param requestedFeatures interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. * * @return builder * @@ -715,11 +719,12 @@ public Builder requestedFeatures(List requestedFeatures) { } /** - * @param requestedFeatures interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * @param requestedFeatures interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/NodeTemplate.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/NodeTemplate.java index 19ea79114f..10fb5e69b3 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/NodeTemplate.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/NodeTemplate.java @@ -10,9 +10,11 @@ import com.pulumi.gcp.Utilities; import com.pulumi.gcp.compute.NodeTemplateArgs; import com.pulumi.gcp.compute.inputs.NodeTemplateState; +import com.pulumi.gcp.compute.outputs.NodeTemplateAccelerator; import com.pulumi.gcp.compute.outputs.NodeTemplateNodeTypeFlexibility; import com.pulumi.gcp.compute.outputs.NodeTemplateServerBinding; import java.lang.String; +import java.util.List; import java.util.Map; import java.util.Optional; import javax.annotation.Nullable; @@ -113,6 +115,53 @@ * } * * <!--End PulumiCodeChooser --> + * ### Node Template Accelerators + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.compute.ComputeFunctions;
+ * import com.pulumi.gcp.compute.inputs.GetNodeTypesArgs;
+ * import com.pulumi.gcp.compute.NodeTemplate;
+ * import com.pulumi.gcp.compute.NodeTemplateArgs;
+ * import com.pulumi.gcp.compute.inputs.NodeTemplateAcceleratorArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         final var central1a = ComputeFunctions.getNodeTypes(GetNodeTypesArgs.builder()
+ *             .zone("us-central1-a")
+ *             .build());
+ * 
+ *         var template = new NodeTemplate("template", NodeTemplateArgs.builder()
+ *             .name("soletenant-with-accelerators")
+ *             .region("us-central1")
+ *             .nodeType("n1-node-96-624")
+ *             .accelerators(NodeTemplateAcceleratorArgs.builder()
+ *                 .acceleratorType("nvidia-tesla-t4")
+ *                 .acceleratorCount(4)
+ *                 .build())
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> * * ## Import * @@ -147,6 +196,24 @@ */ @ResourceType(type="gcp:compute/nodeTemplate:NodeTemplate") public class NodeTemplate extends com.pulumi.resources.CustomResource { + /** + * List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + * + */ + @Export(name="accelerators", refs={List.class,NodeTemplateAccelerator.class}, tree="[0,1]") + private Output> accelerators; + + /** + * @return List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + * + */ + public Output>> accelerators() { + return Codegen.optional(this.accelerators); + } /** * CPU overcommit. * Default value is `NONE`. diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/NodeTemplateArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/NodeTemplateArgs.java index 892e6125ea..7697f59921 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/NodeTemplateArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/NodeTemplateArgs.java @@ -5,9 +5,11 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; +import com.pulumi.gcp.compute.inputs.NodeTemplateAcceleratorArgs; import com.pulumi.gcp.compute.inputs.NodeTemplateNodeTypeFlexibilityArgs; import com.pulumi.gcp.compute.inputs.NodeTemplateServerBindingArgs; import java.lang.String; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -18,6 +20,25 @@ public final class NodeTemplateArgs extends com.pulumi.resources.ResourceArgs { public static final NodeTemplateArgs Empty = new NodeTemplateArgs(); + /** + * List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + * + */ + @Import(name="accelerators") + private @Nullable Output> accelerators; + + /** + * @return List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + * + */ + public Optional>> accelerators() { + return Optional.ofNullable(this.accelerators); + } + /** * CPU overcommit. * Default value is `NONE`. @@ -180,6 +201,7 @@ public Optional> serverBinding() { private NodeTemplateArgs() {} private NodeTemplateArgs(NodeTemplateArgs $) { + this.accelerators = $.accelerators; this.cpuOvercommitType = $.cpuOvercommitType; this.description = $.description; this.name = $.name; @@ -209,6 +231,43 @@ public Builder(NodeTemplateArgs defaults) { $ = new NodeTemplateArgs(Objects.requireNonNull(defaults)); } + /** + * @param accelerators List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + * + * @return builder + * + */ + public Builder accelerators(@Nullable Output> accelerators) { + $.accelerators = accelerators; + return this; + } + + /** + * @param accelerators List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + * + * @return builder + * + */ + public Builder accelerators(List accelerators) { + return accelerators(Output.of(accelerators)); + } + + /** + * @param accelerators List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + * + * @return builder + * + */ + public Builder accelerators(NodeTemplateAcceleratorArgs... accelerators) { + return accelerators(List.of(accelerators)); + } + /** * @param cpuOvercommitType CPU overcommit. * Default value is `NONE`. diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/TargetHttpsProxy.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/TargetHttpsProxy.java index 8ec19d0b88..ed44f9afea 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/TargetHttpsProxy.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/TargetHttpsProxy.java @@ -665,6 +665,10 @@ public Output selfLink() { * INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED * loadBalancingScheme consult ServerTlsPolicy documentation. * If left blank, communications are not encrypted. + * If you remove this field from your configuration at the same time as + * deleting or recreating a referenced ServerTlsPolicy resource, you will + * receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + * within the ServerTlsPolicy resource to avoid this. * */ @Export(name="serverTlsPolicy", refs={String.class}, tree="[0]") @@ -680,6 +684,10 @@ public Output selfLink() { * INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED * loadBalancingScheme consult ServerTlsPolicy documentation. * If left blank, communications are not encrypted. + * If you remove this field from your configuration at the same time as + * deleting or recreating a referenced ServerTlsPolicy resource, you will + * receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + * within the ServerTlsPolicy resource to avoid this. * */ public Output> serverTlsPolicy() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/TargetHttpsProxyArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/TargetHttpsProxyArgs.java index 4e495f93fe..b1db8eb815 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/TargetHttpsProxyArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/TargetHttpsProxyArgs.java @@ -199,6 +199,10 @@ public Optional> quicOverride() { * INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED * loadBalancingScheme consult ServerTlsPolicy documentation. * If left blank, communications are not encrypted. + * If you remove this field from your configuration at the same time as + * deleting or recreating a referenced ServerTlsPolicy resource, you will + * receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + * within the ServerTlsPolicy resource to avoid this. * */ @Import(name="serverTlsPolicy") @@ -214,6 +218,10 @@ public Optional> quicOverride() { * INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED * loadBalancingScheme consult ServerTlsPolicy documentation. * If left blank, communications are not encrypted. + * If you remove this field from your configuration at the same time as + * deleting or recreating a referenced ServerTlsPolicy resource, you will + * receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + * within the ServerTlsPolicy resource to avoid this. * */ public Optional> serverTlsPolicy() { @@ -580,6 +588,10 @@ public Builder quicOverride(String quicOverride) { * INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED * loadBalancingScheme consult ServerTlsPolicy documentation. * If left blank, communications are not encrypted. + * If you remove this field from your configuration at the same time as + * deleting or recreating a referenced ServerTlsPolicy resource, you will + * receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + * within the ServerTlsPolicy resource to avoid this. * * @return builder * @@ -599,6 +611,10 @@ public Builder serverTlsPolicy(@Nullable Output serverTlsPolicy) { * INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED * loadBalancingScheme consult ServerTlsPolicy documentation. * If left blank, communications are not encrypted. + * If you remove this field from your configuration at the same time as + * deleting or recreating a referenced ServerTlsPolicy resource, you will + * receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + * within the ServerTlsPolicy resource to avoid this. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/FirewallAllowArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/FirewallAllowArgs.java index 2eb84f3971..1cca0d6d50 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/FirewallAllowArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/FirewallAllowArgs.java @@ -22,7 +22,7 @@ public final class FirewallAllowArgs extends com.pulumi.resources.ResourceArgs { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. * */ @@ -34,7 +34,7 @@ public final class FirewallAllowArgs extends com.pulumi.resources.ResourceArgs { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. * */ @@ -93,7 +93,7 @@ public Builder(FirewallAllowArgs defaults) { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. * * @return builder @@ -109,7 +109,7 @@ public Builder ports(@Nullable Output> ports) { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. * * @return builder @@ -124,7 +124,7 @@ public Builder ports(List ports) { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. * * @return builder diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/FirewallDenyArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/FirewallDenyArgs.java index dcd3ca9c89..02c542108e 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/FirewallDenyArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/FirewallDenyArgs.java @@ -22,7 +22,7 @@ public final class FirewallDenyArgs extends com.pulumi.resources.ResourceArgs { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. * */ @@ -34,7 +34,7 @@ public final class FirewallDenyArgs extends com.pulumi.resources.ResourceArgs { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. * */ @@ -93,7 +93,7 @@ public Builder(FirewallDenyArgs defaults) { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. * * @return builder @@ -109,7 +109,7 @@ public Builder ports(@Nullable Output> ports) { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. * * @return builder @@ -124,7 +124,7 @@ public Builder ports(List ports) { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. * * @return builder diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceBootDiskArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceBootDiskArgs.java index 8cca580ea3..7d15d5945a 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceBootDiskArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceBootDiskArgs.java @@ -112,6 +112,21 @@ public Optional> initializeParams() return Optional.ofNullable(this.initializeParams); } + /** + * The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + */ + @Import(name="interface") + private @Nullable Output interface_; + + /** + * @return The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + */ + public Optional> interface_() { + return Optional.ofNullable(this.interface_); + } + /** * The self_link of the encryption key that is * stored in Google Cloud KMS to encrypt this disk. Only one of `kms_key_self_link` @@ -175,6 +190,7 @@ private InstanceBootDiskArgs(InstanceBootDiskArgs $) { this.diskEncryptionKeyRaw = $.diskEncryptionKeyRaw; this.diskEncryptionKeySha256 = $.diskEncryptionKeySha256; this.initializeParams = $.initializeParams; + this.interface_ = $.interface_; this.kmsKeySelfLink = $.kmsKeySelfLink; this.mode = $.mode; this.source = $.source; @@ -323,6 +339,27 @@ public Builder initializeParams(InstanceBootDiskInitializeParamsArgs initializeP return initializeParams(Output.of(initializeParams)); } + /** + * @param interface_ The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + * @return builder + * + */ + public Builder interface_(@Nullable Output interface_) { + $.interface_ = interface_; + return this; + } + + /** + * @param interface_ The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + * @return builder + * + */ + public Builder interface_(String interface_) { + return interface_(Output.of(interface_)); + } + /** * @param kmsKeySelfLink The self_link of the encryption key that is * stored in Google Cloud KMS to encrypt this disk. Only one of `kms_key_self_link` diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceFromMachineImageBootDiskArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceFromMachineImageBootDiskArgs.java index 32844c7d68..2f9a01d7ba 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceFromMachineImageBootDiskArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceFromMachineImageBootDiskArgs.java @@ -92,6 +92,21 @@ public Optional> in return Optional.ofNullable(this.initializeParams); } + /** + * The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + */ + @Import(name="interface") + private @Nullable Output interface_; + + /** + * @return The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + */ + public Optional> interface_() { + return Optional.ofNullable(this.interface_); + } + /** * The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. * @@ -145,6 +160,7 @@ private InstanceFromMachineImageBootDiskArgs(InstanceFromMachineImageBootDiskArg this.diskEncryptionKeyRaw = $.diskEncryptionKeyRaw; this.diskEncryptionKeySha256 = $.diskEncryptionKeySha256; this.initializeParams = $.initializeParams; + this.interface_ = $.interface_; this.kmsKeySelfLink = $.kmsKeySelfLink; this.mode = $.mode; this.source = $.source; @@ -273,6 +289,27 @@ public Builder initializeParams(InstanceFromMachineImageBootDiskInitializeParams return initializeParams(Output.of(initializeParams)); } + /** + * @param interface_ The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + * @return builder + * + */ + public Builder interface_(@Nullable Output interface_) { + $.interface_ = interface_; + return this; + } + + /** + * @param interface_ The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + * @return builder + * + */ + public Builder interface_(String interface_) { + return interface_(Output.of(interface_)); + } + /** * @param kmsKeySelfLink The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceFromTemplateBootDiskArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceFromTemplateBootDiskArgs.java index 8a3f93645a..2f51170d56 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceFromTemplateBootDiskArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceFromTemplateBootDiskArgs.java @@ -92,6 +92,21 @@ public Optional> initia return Optional.ofNullable(this.initializeParams); } + /** + * The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + */ + @Import(name="interface") + private @Nullable Output interface_; + + /** + * @return The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + */ + public Optional> interface_() { + return Optional.ofNullable(this.interface_); + } + /** * The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. * @@ -145,6 +160,7 @@ private InstanceFromTemplateBootDiskArgs(InstanceFromTemplateBootDiskArgs $) { this.diskEncryptionKeyRaw = $.diskEncryptionKeyRaw; this.diskEncryptionKeySha256 = $.diskEncryptionKeySha256; this.initializeParams = $.initializeParams; + this.interface_ = $.interface_; this.kmsKeySelfLink = $.kmsKeySelfLink; this.mode = $.mode; this.source = $.source; @@ -273,6 +289,27 @@ public Builder initializeParams(InstanceFromTemplateBootDiskInitializeParamsArgs return initializeParams(Output.of(initializeParams)); } + /** + * @param interface_ The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + * @return builder + * + */ + public Builder interface_(@Nullable Output interface_) { + $.interface_ = interface_; + return this; + } + + /** + * @param interface_ The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + * @return builder + * + */ + public Builder interface_(String interface_) { + return interface_(Output.of(interface_)); + } + /** * @param kmsKeySelfLink The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceNetworkInterfaceArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceNetworkInterfaceArgs.java index edd6bdd40f..6e73ade3ce 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceNetworkInterfaceArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceNetworkInterfaceArgs.java @@ -277,7 +277,7 @@ public Optional> subnetwork() { /** * The project in which the subnetwork belongs. - * If the `subnetwork` is a self_link, this field is ignored in favor of the project + * If the `subnetwork` is a self_link, this field is set to the project * defined in the subnetwork self_link. If the `subnetwork` is a name and this * field is not provided, the provider project is used. * @@ -287,7 +287,7 @@ public Optional> subnetwork() { /** * @return The project in which the subnetwork belongs. - * If the `subnetwork` is a self_link, this field is ignored in favor of the project + * If the `subnetwork` is a self_link, this field is set to the project * defined in the subnetwork self_link. If the `subnetwork` is a name and this * field is not provided, the provider project is used. * @@ -716,7 +716,7 @@ public Builder subnetwork(String subnetwork) { /** * @param subnetworkProject The project in which the subnetwork belongs. - * If the `subnetwork` is a self_link, this field is ignored in favor of the project + * If the `subnetwork` is a self_link, this field is set to the project * defined in the subnetwork self_link. If the `subnetwork` is a name and this * field is not provided, the provider project is used. * @@ -730,7 +730,7 @@ public Builder subnetworkProject(@Nullable Output subnetworkProject) { /** * @param subnetworkProject The project in which the subnetwork belongs. - * If the `subnetwork` is a self_link, this field is ignored in favor of the project + * If the `subnetwork` is a self_link, this field is set to the project * defined in the subnetwork self_link. If the `subnetwork` is a name and this * field is not provided, the provider project is used. * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceState.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceState.java index 356be443e4..5aa12ce2a1 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InstanceState.java @@ -145,14 +145,14 @@ public Optional> cpuPlatform() { } /** - * The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + * The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). * */ @Import(name="currentStatus") private @Nullable Output currentStatus; /** - * @return The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + * @return The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). * */ public Optional> currentStatus() { @@ -978,7 +978,7 @@ public Builder cpuPlatform(String cpuPlatform) { } /** - * @param currentStatus The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + * @param currentStatus The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). * * @return builder * @@ -989,7 +989,7 @@ public Builder currentStatus(@Nullable Output currentStatus) { } /** - * @param currentStatus The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + * @param currentStatus The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InterconnectState.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InterconnectState.java index 3c558323a5..5cb1190c13 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InterconnectState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/InterconnectState.java @@ -513,22 +513,24 @@ public Optional> remoteLocation() { } /** - * interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. * */ @Import(name="requestedFeatures") private @Nullable Output> requestedFeatures; /** - * @return interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * @return interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. * */ public Optional>> requestedFeatures() { @@ -1339,11 +1341,12 @@ public Builder remoteLocation(String remoteLocation) { } /** - * @param requestedFeatures interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * @param requestedFeatures interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. * * @return builder * @@ -1354,11 +1357,12 @@ public Builder requestedFeatures(@Nullable Output> requestedFeature } /** - * @param requestedFeatures interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * @param requestedFeatures interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. * * @return builder * @@ -1368,11 +1372,12 @@ public Builder requestedFeatures(List requestedFeatures) { } /** - * @param requestedFeatures interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * @param requestedFeatures interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/NodeTemplateAcceleratorArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/NodeTemplateAcceleratorArgs.java new file mode 100644 index 0000000000..1c5a4f6f48 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/NodeTemplateAcceleratorArgs.java @@ -0,0 +1,129 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.compute.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class NodeTemplateAcceleratorArgs extends com.pulumi.resources.ResourceArgs { + + public static final NodeTemplateAcceleratorArgs Empty = new NodeTemplateAcceleratorArgs(); + + /** + * The number of the guest accelerator cards exposed to this + * node template. + * + */ + @Import(name="acceleratorCount") + private @Nullable Output acceleratorCount; + + /** + * @return The number of the guest accelerator cards exposed to this + * node template. + * + */ + public Optional> acceleratorCount() { + return Optional.ofNullable(this.acceleratorCount); + } + + /** + * Full or partial URL of the accelerator type resource to expose + * to this node template. + * + */ + @Import(name="acceleratorType") + private @Nullable Output acceleratorType; + + /** + * @return Full or partial URL of the accelerator type resource to expose + * to this node template. + * + */ + public Optional> acceleratorType() { + return Optional.ofNullable(this.acceleratorType); + } + + private NodeTemplateAcceleratorArgs() {} + + private NodeTemplateAcceleratorArgs(NodeTemplateAcceleratorArgs $) { + this.acceleratorCount = $.acceleratorCount; + this.acceleratorType = $.acceleratorType; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(NodeTemplateAcceleratorArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private NodeTemplateAcceleratorArgs $; + + public Builder() { + $ = new NodeTemplateAcceleratorArgs(); + } + + public Builder(NodeTemplateAcceleratorArgs defaults) { + $ = new NodeTemplateAcceleratorArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param acceleratorCount The number of the guest accelerator cards exposed to this + * node template. + * + * @return builder + * + */ + public Builder acceleratorCount(@Nullable Output acceleratorCount) { + $.acceleratorCount = acceleratorCount; + return this; + } + + /** + * @param acceleratorCount The number of the guest accelerator cards exposed to this + * node template. + * + * @return builder + * + */ + public Builder acceleratorCount(Integer acceleratorCount) { + return acceleratorCount(Output.of(acceleratorCount)); + } + + /** + * @param acceleratorType Full or partial URL of the accelerator type resource to expose + * to this node template. + * + * @return builder + * + */ + public Builder acceleratorType(@Nullable Output acceleratorType) { + $.acceleratorType = acceleratorType; + return this; + } + + /** + * @param acceleratorType Full or partial URL of the accelerator type resource to expose + * to this node template. + * + * @return builder + * + */ + public Builder acceleratorType(String acceleratorType) { + return acceleratorType(Output.of(acceleratorType)); + } + + public NodeTemplateAcceleratorArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/NodeTemplateState.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/NodeTemplateState.java index 1aadd58f21..5991bea65b 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/NodeTemplateState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/NodeTemplateState.java @@ -5,9 +5,11 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; +import com.pulumi.gcp.compute.inputs.NodeTemplateAcceleratorArgs; import com.pulumi.gcp.compute.inputs.NodeTemplateNodeTypeFlexibilityArgs; import com.pulumi.gcp.compute.inputs.NodeTemplateServerBindingArgs; import java.lang.String; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -18,6 +20,25 @@ public final class NodeTemplateState extends com.pulumi.resources.ResourceArgs { public static final NodeTemplateState Empty = new NodeTemplateState(); + /** + * List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + * + */ + @Import(name="accelerators") + private @Nullable Output> accelerators; + + /** + * @return List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + * + */ + public Optional>> accelerators() { + return Optional.ofNullable(this.accelerators); + } + /** * CPU overcommit. * Default value is `NONE`. @@ -210,6 +231,7 @@ public Optional> serverBinding() { private NodeTemplateState() {} private NodeTemplateState(NodeTemplateState $) { + this.accelerators = $.accelerators; this.cpuOvercommitType = $.cpuOvercommitType; this.creationTimestamp = $.creationTimestamp; this.description = $.description; @@ -241,6 +263,43 @@ public Builder(NodeTemplateState defaults) { $ = new NodeTemplateState(Objects.requireNonNull(defaults)); } + /** + * @param accelerators List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + * + * @return builder + * + */ + public Builder accelerators(@Nullable Output> accelerators) { + $.accelerators = accelerators; + return this; + } + + /** + * @param accelerators List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + * + * @return builder + * + */ + public Builder accelerators(List accelerators) { + return accelerators(Output.of(accelerators)); + } + + /** + * @param accelerators List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + * + * @return builder + * + */ + public Builder accelerators(NodeTemplateAcceleratorArgs... accelerators) { + return accelerators(List.of(accelerators)); + } + /** * @param cpuOvercommitType CPU overcommit. * Default value is `NONE`. diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/TargetHttpsProxyState.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/TargetHttpsProxyState.java index 5925e34c80..a4065cca08 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/TargetHttpsProxyState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/inputs/TargetHttpsProxyState.java @@ -243,6 +243,10 @@ public Optional> selfLink() { * INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED * loadBalancingScheme consult ServerTlsPolicy documentation. * If left blank, communications are not encrypted. + * If you remove this field from your configuration at the same time as + * deleting or recreating a referenced ServerTlsPolicy resource, you will + * receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + * within the ServerTlsPolicy resource to avoid this. * */ @Import(name="serverTlsPolicy") @@ -258,6 +262,10 @@ public Optional> selfLink() { * INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED * loadBalancingScheme consult ServerTlsPolicy documentation. * If left blank, communications are not encrypted. + * If you remove this field from your configuration at the same time as + * deleting or recreating a referenced ServerTlsPolicy resource, you will + * receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + * within the ServerTlsPolicy resource to avoid this. * */ public Optional> serverTlsPolicy() { @@ -690,6 +698,10 @@ public Builder selfLink(String selfLink) { * INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED * loadBalancingScheme consult ServerTlsPolicy documentation. * If left blank, communications are not encrypted. + * If you remove this field from your configuration at the same time as + * deleting or recreating a referenced ServerTlsPolicy resource, you will + * receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + * within the ServerTlsPolicy resource to avoid this. * * @return builder * @@ -709,6 +721,10 @@ public Builder serverTlsPolicy(@Nullable Output serverTlsPolicy) { * INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED * loadBalancingScheme consult ServerTlsPolicy documentation. * If left blank, communications are not encrypted. + * If you remove this field from your configuration at the same time as + * deleting or recreating a referenced ServerTlsPolicy resource, you will + * receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + * within the ServerTlsPolicy resource to avoid this. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/FirewallAllow.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/FirewallAllow.java index 66609cc38f..cf44a258e4 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/FirewallAllow.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/FirewallAllow.java @@ -17,7 +17,7 @@ public final class FirewallAllow { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. * */ @@ -37,7 +37,7 @@ private FirewallAllow() {} * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. * */ diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/FirewallDeny.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/FirewallDeny.java index ce0a9b4f04..8992603ba0 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/FirewallDeny.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/FirewallDeny.java @@ -17,7 +17,7 @@ public final class FirewallDeny { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. * */ @@ -37,7 +37,7 @@ private FirewallDeny() {} * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. * */ diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/GetInstanceBootDisk.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/GetInstanceBootDisk.java index 6c38e780e0..19bd198384 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/GetInstanceBootDisk.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/GetInstanceBootDisk.java @@ -42,6 +42,11 @@ public final class GetInstanceBootDisk { * */ private List initializeParams; + /** + * @return The disk interface used for attaching this disk. One of `SCSI` or `NVME`. + * + */ + private String interface_; /** * @return The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. * @@ -98,6 +103,13 @@ public String diskEncryptionKeySha256() { public List initializeParams() { return this.initializeParams; } + /** + * @return The disk interface used for attaching this disk. One of `SCSI` or `NVME`. + * + */ + public String interface_() { + return this.interface_; + } /** * @return The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. * @@ -134,6 +146,7 @@ public static final class Builder { private String diskEncryptionKeyRaw; private String diskEncryptionKeySha256; private List initializeParams; + private String interface_; private String kmsKeySelfLink; private String mode; private String source; @@ -145,6 +158,7 @@ public Builder(GetInstanceBootDisk defaults) { this.diskEncryptionKeyRaw = defaults.diskEncryptionKeyRaw; this.diskEncryptionKeySha256 = defaults.diskEncryptionKeySha256; this.initializeParams = defaults.initializeParams; + this.interface_ = defaults.interface_; this.kmsKeySelfLink = defaults.kmsKeySelfLink; this.mode = defaults.mode; this.source = defaults.source; @@ -193,6 +207,14 @@ public Builder initializeParams(List initial public Builder initializeParams(GetInstanceBootDiskInitializeParam... initializeParams) { return initializeParams(List.of(initializeParams)); } + @CustomType.Setter("interface") + public Builder interface_(String interface_) { + if (interface_ == null) { + throw new MissingRequiredPropertyException("GetInstanceBootDisk", "interface_"); + } + this.interface_ = interface_; + return this; + } @CustomType.Setter public Builder kmsKeySelfLink(String kmsKeySelfLink) { if (kmsKeySelfLink == null) { @@ -224,6 +246,7 @@ public GetInstanceBootDisk build() { _resultValue.diskEncryptionKeyRaw = diskEncryptionKeyRaw; _resultValue.diskEncryptionKeySha256 = diskEncryptionKeySha256; _resultValue.initializeParams = initializeParams; + _resultValue.interface_ = interface_; _resultValue.kmsKeySelfLink = kmsKeySelfLink; _resultValue.mode = mode; _resultValue.source = source; diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/GetInstanceResult.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/GetInstanceResult.java index ffa8dd6cca..56bff3c11c 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/GetInstanceResult.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/GetInstanceResult.java @@ -52,7 +52,7 @@ public final class GetInstanceResult { */ private String cpuPlatform; /** - * @return The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + * @return The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). * */ private String currentStatus; @@ -213,7 +213,7 @@ public String cpuPlatform() { return this.cpuPlatform; } /** - * @return The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + * @return The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). * */ public String currentStatus() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceBootDisk.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceBootDisk.java index 43c366f258..727d90488a 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceBootDisk.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceBootDisk.java @@ -48,6 +48,11 @@ public final class InstanceBootDisk { * */ private @Nullable InstanceBootDiskInitializeParams initializeParams; + /** + * @return The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + */ + private @Nullable String interface_; /** * @return The self_link of the encryption key that is * stored in Google Cloud KMS to encrypt this disk. Only one of `kms_key_self_link` @@ -115,6 +120,13 @@ public Optional diskEncryptionKeySha256() { public Optional initializeParams() { return Optional.ofNullable(this.initializeParams); } + /** + * @return The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + */ + public Optional interface_() { + return Optional.ofNullable(this.interface_); + } /** * @return The self_link of the encryption key that is * stored in Google Cloud KMS to encrypt this disk. Only one of `kms_key_self_link` @@ -156,6 +168,7 @@ public static final class Builder { private @Nullable String diskEncryptionKeyRaw; private @Nullable String diskEncryptionKeySha256; private @Nullable InstanceBootDiskInitializeParams initializeParams; + private @Nullable String interface_; private @Nullable String kmsKeySelfLink; private @Nullable String mode; private @Nullable String source; @@ -167,6 +180,7 @@ public Builder(InstanceBootDisk defaults) { this.diskEncryptionKeyRaw = defaults.diskEncryptionKeyRaw; this.diskEncryptionKeySha256 = defaults.diskEncryptionKeySha256; this.initializeParams = defaults.initializeParams; + this.interface_ = defaults.interface_; this.kmsKeySelfLink = defaults.kmsKeySelfLink; this.mode = defaults.mode; this.source = defaults.source; @@ -202,6 +216,12 @@ public Builder initializeParams(@Nullable InstanceBootDiskInitializeParams initi this.initializeParams = initializeParams; return this; } + @CustomType.Setter("interface") + public Builder interface_(@Nullable String interface_) { + + this.interface_ = interface_; + return this; + } @CustomType.Setter public Builder kmsKeySelfLink(@Nullable String kmsKeySelfLink) { @@ -227,6 +247,7 @@ public InstanceBootDisk build() { _resultValue.diskEncryptionKeyRaw = diskEncryptionKeyRaw; _resultValue.diskEncryptionKeySha256 = diskEncryptionKeySha256; _resultValue.initializeParams = initializeParams; + _resultValue.interface_ = interface_; _resultValue.kmsKeySelfLink = kmsKeySelfLink; _resultValue.mode = mode; _resultValue.source = source; diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceFromMachineImageBootDisk.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceFromMachineImageBootDisk.java index ced7ad822d..6d472c245a 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceFromMachineImageBootDisk.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceFromMachineImageBootDisk.java @@ -38,6 +38,11 @@ public final class InstanceFromMachineImageBootDisk { * */ private @Nullable InstanceFromMachineImageBootDiskInitializeParams initializeParams; + /** + * @return The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + */ + private @Nullable String interface_; /** * @return The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. * @@ -90,6 +95,13 @@ public Optional diskEncryptionKeySha256() { public Optional initializeParams() { return Optional.ofNullable(this.initializeParams); } + /** + * @return The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + */ + public Optional interface_() { + return Optional.ofNullable(this.interface_); + } /** * @return The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. * @@ -126,6 +138,7 @@ public static final class Builder { private @Nullable String diskEncryptionKeyRaw; private @Nullable String diskEncryptionKeySha256; private @Nullable InstanceFromMachineImageBootDiskInitializeParams initializeParams; + private @Nullable String interface_; private @Nullable String kmsKeySelfLink; private @Nullable String mode; private @Nullable String source; @@ -137,6 +150,7 @@ public Builder(InstanceFromMachineImageBootDisk defaults) { this.diskEncryptionKeyRaw = defaults.diskEncryptionKeyRaw; this.diskEncryptionKeySha256 = defaults.diskEncryptionKeySha256; this.initializeParams = defaults.initializeParams; + this.interface_ = defaults.interface_; this.kmsKeySelfLink = defaults.kmsKeySelfLink; this.mode = defaults.mode; this.source = defaults.source; @@ -172,6 +186,12 @@ public Builder initializeParams(@Nullable InstanceFromMachineImageBootDiskInitia this.initializeParams = initializeParams; return this; } + @CustomType.Setter("interface") + public Builder interface_(@Nullable String interface_) { + + this.interface_ = interface_; + return this; + } @CustomType.Setter public Builder kmsKeySelfLink(@Nullable String kmsKeySelfLink) { @@ -197,6 +217,7 @@ public InstanceFromMachineImageBootDisk build() { _resultValue.diskEncryptionKeyRaw = diskEncryptionKeyRaw; _resultValue.diskEncryptionKeySha256 = diskEncryptionKeySha256; _resultValue.initializeParams = initializeParams; + _resultValue.interface_ = interface_; _resultValue.kmsKeySelfLink = kmsKeySelfLink; _resultValue.mode = mode; _resultValue.source = source; diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceFromTemplateBootDisk.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceFromTemplateBootDisk.java index ae42a1b441..a5feb7744d 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceFromTemplateBootDisk.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceFromTemplateBootDisk.java @@ -38,6 +38,11 @@ public final class InstanceFromTemplateBootDisk { * */ private @Nullable InstanceFromTemplateBootDiskInitializeParams initializeParams; + /** + * @return The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + */ + private @Nullable String interface_; /** * @return The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. * @@ -90,6 +95,13 @@ public Optional diskEncryptionKeySha256() { public Optional initializeParams() { return Optional.ofNullable(this.initializeParams); } + /** + * @return The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + * + */ + public Optional interface_() { + return Optional.ofNullable(this.interface_); + } /** * @return The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. * @@ -126,6 +138,7 @@ public static final class Builder { private @Nullable String diskEncryptionKeyRaw; private @Nullable String diskEncryptionKeySha256; private @Nullable InstanceFromTemplateBootDiskInitializeParams initializeParams; + private @Nullable String interface_; private @Nullable String kmsKeySelfLink; private @Nullable String mode; private @Nullable String source; @@ -137,6 +150,7 @@ public Builder(InstanceFromTemplateBootDisk defaults) { this.diskEncryptionKeyRaw = defaults.diskEncryptionKeyRaw; this.diskEncryptionKeySha256 = defaults.diskEncryptionKeySha256; this.initializeParams = defaults.initializeParams; + this.interface_ = defaults.interface_; this.kmsKeySelfLink = defaults.kmsKeySelfLink; this.mode = defaults.mode; this.source = defaults.source; @@ -172,6 +186,12 @@ public Builder initializeParams(@Nullable InstanceFromTemplateBootDiskInitialize this.initializeParams = initializeParams; return this; } + @CustomType.Setter("interface") + public Builder interface_(@Nullable String interface_) { + + this.interface_ = interface_; + return this; + } @CustomType.Setter public Builder kmsKeySelfLink(@Nullable String kmsKeySelfLink) { @@ -197,6 +217,7 @@ public InstanceFromTemplateBootDisk build() { _resultValue.diskEncryptionKeyRaw = diskEncryptionKeyRaw; _resultValue.diskEncryptionKeySha256 = diskEncryptionKeySha256; _resultValue.initializeParams = initializeParams; + _resultValue.interface_ = interface_; _resultValue.kmsKeySelfLink = kmsKeySelfLink; _resultValue.mode = mode; _resultValue.source = source; diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceNetworkInterface.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceNetworkInterface.java index bc509fcf35..bb60f364e1 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceNetworkInterface.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/InstanceNetworkInterface.java @@ -108,7 +108,7 @@ public final class InstanceNetworkInterface { private @Nullable String subnetwork; /** * @return The project in which the subnetwork belongs. - * If the `subnetwork` is a self_link, this field is ignored in favor of the project + * If the `subnetwork` is a self_link, this field is set to the project * defined in the subnetwork self_link. If the `subnetwork` is a name and this * field is not provided, the provider project is used. * @@ -238,7 +238,7 @@ public Optional subnetwork() { } /** * @return The project in which the subnetwork belongs. - * If the `subnetwork` is a self_link, this field is ignored in favor of the project + * If the `subnetwork` is a self_link, this field is set to the project * defined in the subnetwork self_link. If the `subnetwork` is a name and this * field is not provided, the provider project is used. * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/NodeTemplateAccelerator.java b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/NodeTemplateAccelerator.java new file mode 100644 index 0000000000..148797dc63 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/compute/outputs/NodeTemplateAccelerator.java @@ -0,0 +1,83 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.compute.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.Integer; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class NodeTemplateAccelerator { + /** + * @return The number of the guest accelerator cards exposed to this + * node template. + * + */ + private @Nullable Integer acceleratorCount; + /** + * @return Full or partial URL of the accelerator type resource to expose + * to this node template. + * + */ + private @Nullable String acceleratorType; + + private NodeTemplateAccelerator() {} + /** + * @return The number of the guest accelerator cards exposed to this + * node template. + * + */ + public Optional acceleratorCount() { + return Optional.ofNullable(this.acceleratorCount); + } + /** + * @return Full or partial URL of the accelerator type resource to expose + * to this node template. + * + */ + public Optional acceleratorType() { + return Optional.ofNullable(this.acceleratorType); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(NodeTemplateAccelerator defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable Integer acceleratorCount; + private @Nullable String acceleratorType; + public Builder() {} + public Builder(NodeTemplateAccelerator defaults) { + Objects.requireNonNull(defaults); + this.acceleratorCount = defaults.acceleratorCount; + this.acceleratorType = defaults.acceleratorType; + } + + @CustomType.Setter + public Builder acceleratorCount(@Nullable Integer acceleratorCount) { + + this.acceleratorCount = acceleratorCount; + return this; + } + @CustomType.Setter + public Builder acceleratorType(@Nullable String acceleratorType) { + + this.acceleratorType = acceleratorType; + return this; + } + public NodeTemplateAccelerator build() { + final var _resultValue = new NodeTemplateAccelerator(); + _resultValue.acceleratorCount = acceleratorCount; + _resultValue.acceleratorType = acceleratorType; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/AttachedCluster.java b/sdk/java/src/main/java/com/pulumi/gcp/container/AttachedCluster.java index de0a10fb78..879de60807 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/AttachedCluster.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/AttachedCluster.java @@ -367,14 +367,14 @@ public Output createTime() { return this.createTime; } /** - * Policy to determine what flags to send on delete. + * Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS * */ @Export(name="deletionPolicy", refs={String.class}, tree="[0]") private Output deletionPolicy; /** - * @return Policy to determine what flags to send on delete. + * @return Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS * */ public Output> deletionPolicy() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/AttachedClusterArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/container/AttachedClusterArgs.java index a728aad026..3f182be6e4 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/AttachedClusterArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/AttachedClusterArgs.java @@ -80,14 +80,14 @@ public Optional> binaryAuthorizat } /** - * Policy to determine what flags to send on delete. + * Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS * */ @Import(name="deletionPolicy") private @Nullable Output deletionPolicy; /** - * @return Policy to determine what flags to send on delete. + * @return Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS * */ public Optional> deletionPolicy() { @@ -387,7 +387,7 @@ public Builder binaryAuthorization(AttachedClusterBinaryAuthorizationArgs binary } /** - * @param deletionPolicy Policy to determine what flags to send on delete. + * @param deletionPolicy Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS * * @return builder * @@ -398,7 +398,7 @@ public Builder deletionPolicy(@Nullable Output deletionPolicy) { } /** - * @param deletionPolicy Policy to determine what flags to send on delete. + * @param deletionPolicy Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/AttachedClusterState.java b/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/AttachedClusterState.java index 2d3a902ee7..4ac7c8bf1a 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/AttachedClusterState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/AttachedClusterState.java @@ -117,14 +117,14 @@ public Optional> createTime() { } /** - * Policy to determine what flags to send on delete. + * Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS * */ @Import(name="deletionPolicy") private @Nullable Output deletionPolicy; /** - * @return Policy to determine what flags to send on delete. + * @return Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS * */ public Optional> deletionPolicy() { @@ -600,7 +600,7 @@ public Builder createTime(String createTime) { } /** - * @param deletionPolicy Policy to determine what flags to send on delete. + * @param deletionPolicy Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS * * @return builder * @@ -611,7 +611,7 @@ public Builder deletionPolicy(@Nullable Output deletionPolicy) { } /** - * @param deletionPolicy Policy to determine what flags to send on delete. + * @param deletionPolicy Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodeConfigKubeletConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodeConfigKubeletConfigArgs.java index ba8f2cd8de..3fca304105 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodeConfigKubeletConfigArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodeConfigKubeletConfigArgs.java @@ -85,6 +85,21 @@ public Output cpuManagerPolicy() { return this.cpuManagerPolicy; } + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + @Import(name="insecureKubeletReadonlyPortEnabled") + private @Nullable Output insecureKubeletReadonlyPortEnabled; + + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + public Optional> insecureKubeletReadonlyPortEnabled() { + return Optional.ofNullable(this.insecureKubeletReadonlyPortEnabled); + } + /** * Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. * @@ -106,6 +121,7 @@ private ClusterNodeConfigKubeletConfigArgs(ClusterNodeConfigKubeletConfigArgs $) this.cpuCfsQuota = $.cpuCfsQuota; this.cpuCfsQuotaPeriod = $.cpuCfsQuotaPeriod; this.cpuManagerPolicy = $.cpuManagerPolicy; + this.insecureKubeletReadonlyPortEnabled = $.insecureKubeletReadonlyPortEnabled; this.podPidsLimit = $.podPidsLimit; } @@ -212,6 +228,27 @@ public Builder cpuManagerPolicy(String cpuManagerPolicy) { return cpuManagerPolicy(Output.of(cpuManagerPolicy)); } + /** + * @param insecureKubeletReadonlyPortEnabled Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + * @return builder + * + */ + public Builder insecureKubeletReadonlyPortEnabled(@Nullable Output insecureKubeletReadonlyPortEnabled) { + $.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return this; + } + + /** + * @param insecureKubeletReadonlyPortEnabled Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + * @return builder + * + */ + public Builder insecureKubeletReadonlyPortEnabled(String insecureKubeletReadonlyPortEnabled) { + return insecureKubeletReadonlyPortEnabled(Output.of(insecureKubeletReadonlyPortEnabled)); + } + /** * @param podPidsLimit Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolAutoConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolAutoConfigArgs.java index 3323ea2074..c0c2dffdb1 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolAutoConfigArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolAutoConfigArgs.java @@ -6,6 +6,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; import com.pulumi.gcp.container.inputs.ClusterNodePoolAutoConfigNetworkTagsArgs; +import com.pulumi.gcp.container.inputs.ClusterNodePoolAutoConfigNodeKubeletConfigArgs; import java.lang.String; import java.util.Map; import java.util.Objects; @@ -18,20 +19,37 @@ public final class ClusterNodePoolAutoConfigArgs extends com.pulumi.resources.Re public static final ClusterNodePoolAutoConfigArgs Empty = new ClusterNodePoolAutoConfigArgs(); /** - * The network tag config for the cluster's automatically provisioned node pools. + * The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. * */ @Import(name="networkTags") private @Nullable Output networkTags; /** - * @return The network tag config for the cluster's automatically provisioned node pools. + * @return The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. * */ public Optional> networkTags() { return Optional.ofNullable(this.networkTags); } + /** + * Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here. + * Structure is documented below. + * + */ + @Import(name="nodeKubeletConfig") + private @Nullable Output nodeKubeletConfig; + + /** + * @return Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here. + * Structure is documented below. + * + */ + public Optional> nodeKubeletConfig() { + return Optional.ofNullable(this.nodeKubeletConfig); + } + /** * A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. * @@ -51,6 +69,7 @@ private ClusterNodePoolAutoConfigArgs() {} private ClusterNodePoolAutoConfigArgs(ClusterNodePoolAutoConfigArgs $) { this.networkTags = $.networkTags; + this.nodeKubeletConfig = $.nodeKubeletConfig; this.resourceManagerTags = $.resourceManagerTags; } @@ -73,7 +92,7 @@ public Builder(ClusterNodePoolAutoConfigArgs defaults) { } /** - * @param networkTags The network tag config for the cluster's automatically provisioned node pools. + * @param networkTags The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. * * @return builder * @@ -84,7 +103,7 @@ public Builder networkTags(@Nullable Output nodeKubeletConfig) { + $.nodeKubeletConfig = nodeKubeletConfig; + return this; + } + + /** + * @param nodeKubeletConfig Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here. + * Structure is documented below. + * + * @return builder + * + */ + public Builder nodeKubeletConfig(ClusterNodePoolAutoConfigNodeKubeletConfigArgs nodeKubeletConfig) { + return nodeKubeletConfig(Output.of(nodeKubeletConfig)); + } + /** * @param resourceManagerTags A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolAutoConfigNodeKubeletConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolAutoConfigNodeKubeletConfigArgs.java new file mode 100644 index 0000000000..42c9bdd3e1 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolAutoConfigNodeKubeletConfigArgs.java @@ -0,0 +1,83 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.container.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class ClusterNodePoolAutoConfigNodeKubeletConfigArgs extends com.pulumi.resources.ResourceArgs { + + public static final ClusterNodePoolAutoConfigNodeKubeletConfigArgs Empty = new ClusterNodePoolAutoConfigNodeKubeletConfigArgs(); + + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + @Import(name="insecureKubeletReadonlyPortEnabled") + private @Nullable Output insecureKubeletReadonlyPortEnabled; + + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + public Optional> insecureKubeletReadonlyPortEnabled() { + return Optional.ofNullable(this.insecureKubeletReadonlyPortEnabled); + } + + private ClusterNodePoolAutoConfigNodeKubeletConfigArgs() {} + + private ClusterNodePoolAutoConfigNodeKubeletConfigArgs(ClusterNodePoolAutoConfigNodeKubeletConfigArgs $) { + this.insecureKubeletReadonlyPortEnabled = $.insecureKubeletReadonlyPortEnabled; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(ClusterNodePoolAutoConfigNodeKubeletConfigArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private ClusterNodePoolAutoConfigNodeKubeletConfigArgs $; + + public Builder() { + $ = new ClusterNodePoolAutoConfigNodeKubeletConfigArgs(); + } + + public Builder(ClusterNodePoolAutoConfigNodeKubeletConfigArgs defaults) { + $ = new ClusterNodePoolAutoConfigNodeKubeletConfigArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param insecureKubeletReadonlyPortEnabled Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + * @return builder + * + */ + public Builder insecureKubeletReadonlyPortEnabled(@Nullable Output insecureKubeletReadonlyPortEnabled) { + $.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return this; + } + + /** + * @param insecureKubeletReadonlyPortEnabled Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + * @return builder + * + */ + public Builder insecureKubeletReadonlyPortEnabled(String insecureKubeletReadonlyPortEnabled) { + return insecureKubeletReadonlyPortEnabled(Output.of(insecureKubeletReadonlyPortEnabled)); + } + + public ClusterNodePoolAutoConfigNodeKubeletConfigArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolDefaultsNodeConfigDefaultsArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolDefaultsNodeConfigDefaultsArgs.java index dcc892cf5a..e5aa896f7b 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolDefaultsNodeConfigDefaultsArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolDefaultsNodeConfigDefaultsArgs.java @@ -47,6 +47,21 @@ public Optional> return Optional.ofNullable(this.gcfsConfig); } + /** + * Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + @Import(name="insecureKubeletReadonlyPortEnabled") + private @Nullable Output insecureKubeletReadonlyPortEnabled; + + /** + * @return Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + public Optional> insecureKubeletReadonlyPortEnabled() { + return Optional.ofNullable(this.insecureKubeletReadonlyPortEnabled); + } + /** * The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. * @@ -67,6 +82,7 @@ private ClusterNodePoolDefaultsNodeConfigDefaultsArgs() {} private ClusterNodePoolDefaultsNodeConfigDefaultsArgs(ClusterNodePoolDefaultsNodeConfigDefaultsArgs $) { this.containerdConfig = $.containerdConfig; this.gcfsConfig = $.gcfsConfig; + this.insecureKubeletReadonlyPortEnabled = $.insecureKubeletReadonlyPortEnabled; this.loggingVariant = $.loggingVariant; } @@ -130,6 +146,27 @@ public Builder gcfsConfig(ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfigArg return gcfsConfig(Output.of(gcfsConfig)); } + /** + * @param insecureKubeletReadonlyPortEnabled Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + * @return builder + * + */ + public Builder insecureKubeletReadonlyPortEnabled(@Nullable Output insecureKubeletReadonlyPortEnabled) { + $.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return this; + } + + /** + * @param insecureKubeletReadonlyPortEnabled Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + * @return builder + * + */ + public Builder insecureKubeletReadonlyPortEnabled(String insecureKubeletReadonlyPortEnabled) { + return insecureKubeletReadonlyPortEnabled(Output.of(insecureKubeletReadonlyPortEnabled)); + } + /** * @param loggingVariant The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolNodeConfigKubeletConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolNodeConfigKubeletConfigArgs.java index eff680cfb2..e34cd3e985 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolNodeConfigKubeletConfigArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/ClusterNodePoolNodeConfigKubeletConfigArgs.java @@ -85,6 +85,21 @@ public Output cpuManagerPolicy() { return this.cpuManagerPolicy; } + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + @Import(name="insecureKubeletReadonlyPortEnabled") + private @Nullable Output insecureKubeletReadonlyPortEnabled; + + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + public Optional> insecureKubeletReadonlyPortEnabled() { + return Optional.ofNullable(this.insecureKubeletReadonlyPortEnabled); + } + /** * Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. * @@ -106,6 +121,7 @@ private ClusterNodePoolNodeConfigKubeletConfigArgs(ClusterNodePoolNodeConfigKube this.cpuCfsQuota = $.cpuCfsQuota; this.cpuCfsQuotaPeriod = $.cpuCfsQuotaPeriod; this.cpuManagerPolicy = $.cpuManagerPolicy; + this.insecureKubeletReadonlyPortEnabled = $.insecureKubeletReadonlyPortEnabled; this.podPidsLimit = $.podPidsLimit; } @@ -212,6 +228,27 @@ public Builder cpuManagerPolicy(String cpuManagerPolicy) { return cpuManagerPolicy(Output.of(cpuManagerPolicy)); } + /** + * @param insecureKubeletReadonlyPortEnabled Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + * @return builder + * + */ + public Builder insecureKubeletReadonlyPortEnabled(@Nullable Output insecureKubeletReadonlyPortEnabled) { + $.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return this; + } + + /** + * @param insecureKubeletReadonlyPortEnabled Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + * @return builder + * + */ + public Builder insecureKubeletReadonlyPortEnabled(String insecureKubeletReadonlyPortEnabled) { + return insecureKubeletReadonlyPortEnabled(Output.of(insecureKubeletReadonlyPortEnabled)); + } + /** * @param podPidsLimit Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/NodePoolNodeConfigKubeletConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/NodePoolNodeConfigKubeletConfigArgs.java index 18a2f84366..4e3bb35fc7 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/NodePoolNodeConfigKubeletConfigArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/inputs/NodePoolNodeConfigKubeletConfigArgs.java @@ -63,6 +63,21 @@ public Output cpuManagerPolicy() { return this.cpuManagerPolicy; } + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + @Import(name="insecureKubeletReadonlyPortEnabled") + private @Nullable Output insecureKubeletReadonlyPortEnabled; + + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + public Optional> insecureKubeletReadonlyPortEnabled() { + return Optional.ofNullable(this.insecureKubeletReadonlyPortEnabled); + } + /** * Controls the maximum number of processes allowed to run in a pod. * @@ -84,6 +99,7 @@ private NodePoolNodeConfigKubeletConfigArgs(NodePoolNodeConfigKubeletConfigArgs this.cpuCfsQuota = $.cpuCfsQuota; this.cpuCfsQuotaPeriod = $.cpuCfsQuotaPeriod; this.cpuManagerPolicy = $.cpuManagerPolicy; + this.insecureKubeletReadonlyPortEnabled = $.insecureKubeletReadonlyPortEnabled; this.podPidsLimit = $.podPidsLimit; } @@ -168,6 +184,27 @@ public Builder cpuManagerPolicy(String cpuManagerPolicy) { return cpuManagerPolicy(Output.of(cpuManagerPolicy)); } + /** + * @param insecureKubeletReadonlyPortEnabled Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + * @return builder + * + */ + public Builder insecureKubeletReadonlyPortEnabled(@Nullable Output insecureKubeletReadonlyPortEnabled) { + $.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return this; + } + + /** + * @param insecureKubeletReadonlyPortEnabled Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + * @return builder + * + */ + public Builder insecureKubeletReadonlyPortEnabled(String insecureKubeletReadonlyPortEnabled) { + return insecureKubeletReadonlyPortEnabled(Output.of(insecureKubeletReadonlyPortEnabled)); + } + /** * @param podPidsLimit Controls the maximum number of processes allowed to run in a pod. * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodeConfigKubeletConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodeConfigKubeletConfig.java index d985d24dc2..89c19f86db 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodeConfigKubeletConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodeConfigKubeletConfig.java @@ -40,6 +40,11 @@ public final class ClusterNodeConfigKubeletConfig { * */ private String cpuManagerPolicy; + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + private @Nullable String insecureKubeletReadonlyPortEnabled; /** * @return Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. * @@ -79,6 +84,13 @@ public Optional cpuCfsQuotaPeriod() { public String cpuManagerPolicy() { return this.cpuManagerPolicy; } + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + public Optional insecureKubeletReadonlyPortEnabled() { + return Optional.ofNullable(this.insecureKubeletReadonlyPortEnabled); + } /** * @return Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. * @@ -99,6 +111,7 @@ public static final class Builder { private @Nullable Boolean cpuCfsQuota; private @Nullable String cpuCfsQuotaPeriod; private String cpuManagerPolicy; + private @Nullable String insecureKubeletReadonlyPortEnabled; private @Nullable Integer podPidsLimit; public Builder() {} public Builder(ClusterNodeConfigKubeletConfig defaults) { @@ -106,6 +119,7 @@ public Builder(ClusterNodeConfigKubeletConfig defaults) { this.cpuCfsQuota = defaults.cpuCfsQuota; this.cpuCfsQuotaPeriod = defaults.cpuCfsQuotaPeriod; this.cpuManagerPolicy = defaults.cpuManagerPolicy; + this.insecureKubeletReadonlyPortEnabled = defaults.insecureKubeletReadonlyPortEnabled; this.podPidsLimit = defaults.podPidsLimit; } @@ -130,6 +144,12 @@ public Builder cpuManagerPolicy(String cpuManagerPolicy) { return this; } @CustomType.Setter + public Builder insecureKubeletReadonlyPortEnabled(@Nullable String insecureKubeletReadonlyPortEnabled) { + + this.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return this; + } + @CustomType.Setter public Builder podPidsLimit(@Nullable Integer podPidsLimit) { this.podPidsLimit = podPidsLimit; @@ -140,6 +160,7 @@ public ClusterNodeConfigKubeletConfig build() { _resultValue.cpuCfsQuota = cpuCfsQuota; _resultValue.cpuCfsQuotaPeriod = cpuCfsQuotaPeriod; _resultValue.cpuManagerPolicy = cpuManagerPolicy; + _resultValue.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; _resultValue.podPidsLimit = podPidsLimit; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolAutoConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolAutoConfig.java index e263249a9b..cd80fdedc5 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolAutoConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolAutoConfig.java @@ -5,6 +5,7 @@ import com.pulumi.core.annotations.CustomType; import com.pulumi.gcp.container.outputs.ClusterNodePoolAutoConfigNetworkTags; +import com.pulumi.gcp.container.outputs.ClusterNodePoolAutoConfigNodeKubeletConfig; import java.lang.String; import java.util.Map; import java.util.Objects; @@ -14,10 +15,16 @@ @CustomType public final class ClusterNodePoolAutoConfig { /** - * @return The network tag config for the cluster's automatically provisioned node pools. + * @return The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. * */ private @Nullable ClusterNodePoolAutoConfigNetworkTags networkTags; + /** + * @return Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here. + * Structure is documented below. + * + */ + private @Nullable ClusterNodePoolAutoConfigNodeKubeletConfig nodeKubeletConfig; /** * @return A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. * @@ -26,12 +33,20 @@ public final class ClusterNodePoolAutoConfig { private ClusterNodePoolAutoConfig() {} /** - * @return The network tag config for the cluster's automatically provisioned node pools. + * @return The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. * */ public Optional networkTags() { return Optional.ofNullable(this.networkTags); } + /** + * @return Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here. + * Structure is documented below. + * + */ + public Optional nodeKubeletConfig() { + return Optional.ofNullable(this.nodeKubeletConfig); + } /** * @return A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. * @@ -50,11 +65,13 @@ public static Builder builder(ClusterNodePoolAutoConfig defaults) { @CustomType.Builder public static final class Builder { private @Nullable ClusterNodePoolAutoConfigNetworkTags networkTags; + private @Nullable ClusterNodePoolAutoConfigNodeKubeletConfig nodeKubeletConfig; private @Nullable Map resourceManagerTags; public Builder() {} public Builder(ClusterNodePoolAutoConfig defaults) { Objects.requireNonNull(defaults); this.networkTags = defaults.networkTags; + this.nodeKubeletConfig = defaults.nodeKubeletConfig; this.resourceManagerTags = defaults.resourceManagerTags; } @@ -65,6 +82,12 @@ public Builder networkTags(@Nullable ClusterNodePoolAutoConfigNetworkTags networ return this; } @CustomType.Setter + public Builder nodeKubeletConfig(@Nullable ClusterNodePoolAutoConfigNodeKubeletConfig nodeKubeletConfig) { + + this.nodeKubeletConfig = nodeKubeletConfig; + return this; + } + @CustomType.Setter public Builder resourceManagerTags(@Nullable Map resourceManagerTags) { this.resourceManagerTags = resourceManagerTags; @@ -73,6 +96,7 @@ public Builder resourceManagerTags(@Nullable Map resourceManagerT public ClusterNodePoolAutoConfig build() { final var _resultValue = new ClusterNodePoolAutoConfig(); _resultValue.networkTags = networkTags; + _resultValue.nodeKubeletConfig = nodeKubeletConfig; _resultValue.resourceManagerTags = resourceManagerTags; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolAutoConfigNodeKubeletConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolAutoConfigNodeKubeletConfig.java new file mode 100644 index 0000000000..3bd1dc5a8a --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolAutoConfigNodeKubeletConfig.java @@ -0,0 +1,57 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.container.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class ClusterNodePoolAutoConfigNodeKubeletConfig { + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + private @Nullable String insecureKubeletReadonlyPortEnabled; + + private ClusterNodePoolAutoConfigNodeKubeletConfig() {} + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + public Optional insecureKubeletReadonlyPortEnabled() { + return Optional.ofNullable(this.insecureKubeletReadonlyPortEnabled); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(ClusterNodePoolAutoConfigNodeKubeletConfig defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String insecureKubeletReadonlyPortEnabled; + public Builder() {} + public Builder(ClusterNodePoolAutoConfigNodeKubeletConfig defaults) { + Objects.requireNonNull(defaults); + this.insecureKubeletReadonlyPortEnabled = defaults.insecureKubeletReadonlyPortEnabled; + } + + @CustomType.Setter + public Builder insecureKubeletReadonlyPortEnabled(@Nullable String insecureKubeletReadonlyPortEnabled) { + + this.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return this; + } + public ClusterNodePoolAutoConfigNodeKubeletConfig build() { + final var _resultValue = new ClusterNodePoolAutoConfigNodeKubeletConfig(); + _resultValue.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolDefaultsNodeConfigDefaults.java b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolDefaultsNodeConfigDefaults.java index 47165b502f..5ebb7f1b4d 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolDefaultsNodeConfigDefaults.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolDefaultsNodeConfigDefaults.java @@ -23,6 +23,11 @@ public final class ClusterNodePoolDefaultsNodeConfigDefaults { * */ private @Nullable ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfig gcfsConfig; + /** + * @return Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + private @Nullable String insecureKubeletReadonlyPortEnabled; /** * @return The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. * @@ -44,6 +49,13 @@ public Optional conta public Optional gcfsConfig() { return Optional.ofNullable(this.gcfsConfig); } + /** + * @return Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + public Optional insecureKubeletReadonlyPortEnabled() { + return Optional.ofNullable(this.insecureKubeletReadonlyPortEnabled); + } /** * @return The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. * @@ -63,12 +75,14 @@ public static Builder builder(ClusterNodePoolDefaultsNodeConfigDefaults defaults public static final class Builder { private @Nullable ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfig containerdConfig; private @Nullable ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfig gcfsConfig; + private @Nullable String insecureKubeletReadonlyPortEnabled; private @Nullable String loggingVariant; public Builder() {} public Builder(ClusterNodePoolDefaultsNodeConfigDefaults defaults) { Objects.requireNonNull(defaults); this.containerdConfig = defaults.containerdConfig; this.gcfsConfig = defaults.gcfsConfig; + this.insecureKubeletReadonlyPortEnabled = defaults.insecureKubeletReadonlyPortEnabled; this.loggingVariant = defaults.loggingVariant; } @@ -85,6 +99,12 @@ public Builder gcfsConfig(@Nullable ClusterNodePoolDefaultsNodeConfigDefaultsGcf return this; } @CustomType.Setter + public Builder insecureKubeletReadonlyPortEnabled(@Nullable String insecureKubeletReadonlyPortEnabled) { + + this.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return this; + } + @CustomType.Setter public Builder loggingVariant(@Nullable String loggingVariant) { this.loggingVariant = loggingVariant; @@ -94,6 +114,7 @@ public ClusterNodePoolDefaultsNodeConfigDefaults build() { final var _resultValue = new ClusterNodePoolDefaultsNodeConfigDefaults(); _resultValue.containerdConfig = containerdConfig; _resultValue.gcfsConfig = gcfsConfig; + _resultValue.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; _resultValue.loggingVariant = loggingVariant; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolNodeConfigKubeletConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolNodeConfigKubeletConfig.java index 840e3b58ae..f7e93dff30 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolNodeConfigKubeletConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/ClusterNodePoolNodeConfigKubeletConfig.java @@ -40,6 +40,11 @@ public final class ClusterNodePoolNodeConfigKubeletConfig { * */ private String cpuManagerPolicy; + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + private @Nullable String insecureKubeletReadonlyPortEnabled; /** * @return Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. * @@ -79,6 +84,13 @@ public Optional cpuCfsQuotaPeriod() { public String cpuManagerPolicy() { return this.cpuManagerPolicy; } + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + public Optional insecureKubeletReadonlyPortEnabled() { + return Optional.ofNullable(this.insecureKubeletReadonlyPortEnabled); + } /** * @return Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. * @@ -99,6 +111,7 @@ public static final class Builder { private @Nullable Boolean cpuCfsQuota; private @Nullable String cpuCfsQuotaPeriod; private String cpuManagerPolicy; + private @Nullable String insecureKubeletReadonlyPortEnabled; private @Nullable Integer podPidsLimit; public Builder() {} public Builder(ClusterNodePoolNodeConfigKubeletConfig defaults) { @@ -106,6 +119,7 @@ public Builder(ClusterNodePoolNodeConfigKubeletConfig defaults) { this.cpuCfsQuota = defaults.cpuCfsQuota; this.cpuCfsQuotaPeriod = defaults.cpuCfsQuotaPeriod; this.cpuManagerPolicy = defaults.cpuManagerPolicy; + this.insecureKubeletReadonlyPortEnabled = defaults.insecureKubeletReadonlyPortEnabled; this.podPidsLimit = defaults.podPidsLimit; } @@ -130,6 +144,12 @@ public Builder cpuManagerPolicy(String cpuManagerPolicy) { return this; } @CustomType.Setter + public Builder insecureKubeletReadonlyPortEnabled(@Nullable String insecureKubeletReadonlyPortEnabled) { + + this.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return this; + } + @CustomType.Setter public Builder podPidsLimit(@Nullable Integer podPidsLimit) { this.podPidsLimit = podPidsLimit; @@ -140,6 +160,7 @@ public ClusterNodePoolNodeConfigKubeletConfig build() { _resultValue.cpuCfsQuota = cpuCfsQuota; _resultValue.cpuCfsQuotaPeriod = cpuCfsQuotaPeriod; _resultValue.cpuManagerPolicy = cpuManagerPolicy; + _resultValue.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; _resultValue.podPidsLimit = podPidsLimit; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodeConfigKubeletConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodeConfigKubeletConfig.java index c8f05a04e6..b547ed7c34 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodeConfigKubeletConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodeConfigKubeletConfig.java @@ -27,6 +27,11 @@ public final class GetClusterNodeConfigKubeletConfig { * */ private String cpuManagerPolicy; + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + private String insecureKubeletReadonlyPortEnabled; /** * @return Controls the maximum number of processes allowed to run in a pod. * @@ -55,6 +60,13 @@ public String cpuCfsQuotaPeriod() { public String cpuManagerPolicy() { return this.cpuManagerPolicy; } + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + public String insecureKubeletReadonlyPortEnabled() { + return this.insecureKubeletReadonlyPortEnabled; + } /** * @return Controls the maximum number of processes allowed to run in a pod. * @@ -75,6 +87,7 @@ public static final class Builder { private Boolean cpuCfsQuota; private String cpuCfsQuotaPeriod; private String cpuManagerPolicy; + private String insecureKubeletReadonlyPortEnabled; private Integer podPidsLimit; public Builder() {} public Builder(GetClusterNodeConfigKubeletConfig defaults) { @@ -82,6 +95,7 @@ public Builder(GetClusterNodeConfigKubeletConfig defaults) { this.cpuCfsQuota = defaults.cpuCfsQuota; this.cpuCfsQuotaPeriod = defaults.cpuCfsQuotaPeriod; this.cpuManagerPolicy = defaults.cpuManagerPolicy; + this.insecureKubeletReadonlyPortEnabled = defaults.insecureKubeletReadonlyPortEnabled; this.podPidsLimit = defaults.podPidsLimit; } @@ -110,6 +124,14 @@ public Builder cpuManagerPolicy(String cpuManagerPolicy) { return this; } @CustomType.Setter + public Builder insecureKubeletReadonlyPortEnabled(String insecureKubeletReadonlyPortEnabled) { + if (insecureKubeletReadonlyPortEnabled == null) { + throw new MissingRequiredPropertyException("GetClusterNodeConfigKubeletConfig", "insecureKubeletReadonlyPortEnabled"); + } + this.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return this; + } + @CustomType.Setter public Builder podPidsLimit(Integer podPidsLimit) { if (podPidsLimit == null) { throw new MissingRequiredPropertyException("GetClusterNodeConfigKubeletConfig", "podPidsLimit"); @@ -122,6 +144,7 @@ public GetClusterNodeConfigKubeletConfig build() { _resultValue.cpuCfsQuota = cpuCfsQuota; _resultValue.cpuCfsQuotaPeriod = cpuCfsQuotaPeriod; _resultValue.cpuManagerPolicy = cpuManagerPolicy; + _resultValue.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; _resultValue.podPidsLimit = podPidsLimit; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolAutoConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolAutoConfig.java index c793c8a9bf..c6b25d6fb1 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolAutoConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolAutoConfig.java @@ -6,6 +6,7 @@ import com.pulumi.core.annotations.CustomType; import com.pulumi.exceptions.MissingRequiredPropertyException; import com.pulumi.gcp.container.outputs.GetClusterNodePoolAutoConfigNetworkTag; +import com.pulumi.gcp.container.outputs.GetClusterNodePoolAutoConfigNodeKubeletConfig; import java.lang.String; import java.util.List; import java.util.Map; @@ -18,6 +19,11 @@ public final class GetClusterNodePoolAutoConfig { * */ private List networkTags; + /** + * @return Node kubelet configs. + * + */ + private List nodeKubeletConfigs; /** * @return A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. * @@ -32,6 +38,13 @@ private GetClusterNodePoolAutoConfig() {} public List networkTags() { return this.networkTags; } + /** + * @return Node kubelet configs. + * + */ + public List nodeKubeletConfigs() { + return this.nodeKubeletConfigs; + } /** * @return A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. * @@ -50,11 +63,13 @@ public static Builder builder(GetClusterNodePoolAutoConfig defaults) { @CustomType.Builder public static final class Builder { private List networkTags; + private List nodeKubeletConfigs; private Map resourceManagerTags; public Builder() {} public Builder(GetClusterNodePoolAutoConfig defaults) { Objects.requireNonNull(defaults); this.networkTags = defaults.networkTags; + this.nodeKubeletConfigs = defaults.nodeKubeletConfigs; this.resourceManagerTags = defaults.resourceManagerTags; } @@ -70,6 +85,17 @@ public Builder networkTags(GetClusterNodePoolAutoConfigNetworkTag... networkTags return networkTags(List.of(networkTags)); } @CustomType.Setter + public Builder nodeKubeletConfigs(List nodeKubeletConfigs) { + if (nodeKubeletConfigs == null) { + throw new MissingRequiredPropertyException("GetClusterNodePoolAutoConfig", "nodeKubeletConfigs"); + } + this.nodeKubeletConfigs = nodeKubeletConfigs; + return this; + } + public Builder nodeKubeletConfigs(GetClusterNodePoolAutoConfigNodeKubeletConfig... nodeKubeletConfigs) { + return nodeKubeletConfigs(List.of(nodeKubeletConfigs)); + } + @CustomType.Setter public Builder resourceManagerTags(Map resourceManagerTags) { if (resourceManagerTags == null) { throw new MissingRequiredPropertyException("GetClusterNodePoolAutoConfig", "resourceManagerTags"); @@ -80,6 +106,7 @@ public Builder resourceManagerTags(Map resourceManagerTags) { public GetClusterNodePoolAutoConfig build() { final var _resultValue = new GetClusterNodePoolAutoConfig(); _resultValue.networkTags = networkTags; + _resultValue.nodeKubeletConfigs = nodeKubeletConfigs; _resultValue.resourceManagerTags = resourceManagerTags; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolAutoConfigNodeKubeletConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolAutoConfigNodeKubeletConfig.java new file mode 100644 index 0000000000..3762995524 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolAutoConfigNodeKubeletConfig.java @@ -0,0 +1,58 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.container.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetClusterNodePoolAutoConfigNodeKubeletConfig { + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + private String insecureKubeletReadonlyPortEnabled; + + private GetClusterNodePoolAutoConfigNodeKubeletConfig() {} + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + public String insecureKubeletReadonlyPortEnabled() { + return this.insecureKubeletReadonlyPortEnabled; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetClusterNodePoolAutoConfigNodeKubeletConfig defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String insecureKubeletReadonlyPortEnabled; + public Builder() {} + public Builder(GetClusterNodePoolAutoConfigNodeKubeletConfig defaults) { + Objects.requireNonNull(defaults); + this.insecureKubeletReadonlyPortEnabled = defaults.insecureKubeletReadonlyPortEnabled; + } + + @CustomType.Setter + public Builder insecureKubeletReadonlyPortEnabled(String insecureKubeletReadonlyPortEnabled) { + if (insecureKubeletReadonlyPortEnabled == null) { + throw new MissingRequiredPropertyException("GetClusterNodePoolAutoConfigNodeKubeletConfig", "insecureKubeletReadonlyPortEnabled"); + } + this.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return this; + } + public GetClusterNodePoolAutoConfigNodeKubeletConfig build() { + final var _resultValue = new GetClusterNodePoolAutoConfigNodeKubeletConfig(); + _resultValue.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolDefaultNodeConfigDefault.java b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolDefaultNodeConfigDefault.java index e5ebf972fc..4edb1f1417 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolDefaultNodeConfigDefault.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolDefaultNodeConfigDefault.java @@ -23,6 +23,11 @@ public final class GetClusterNodePoolDefaultNodeConfigDefault { * */ private List gcfsConfigs; + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + private String insecureKubeletReadonlyPortEnabled; /** * @return Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. * @@ -44,6 +49,13 @@ public List containe public List gcfsConfigs() { return this.gcfsConfigs; } + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + public String insecureKubeletReadonlyPortEnabled() { + return this.insecureKubeletReadonlyPortEnabled; + } /** * @return Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. * @@ -63,12 +75,14 @@ public static Builder builder(GetClusterNodePoolDefaultNodeConfigDefault default public static final class Builder { private List containerdConfigs; private List gcfsConfigs; + private String insecureKubeletReadonlyPortEnabled; private String loggingVariant; public Builder() {} public Builder(GetClusterNodePoolDefaultNodeConfigDefault defaults) { Objects.requireNonNull(defaults); this.containerdConfigs = defaults.containerdConfigs; this.gcfsConfigs = defaults.gcfsConfigs; + this.insecureKubeletReadonlyPortEnabled = defaults.insecureKubeletReadonlyPortEnabled; this.loggingVariant = defaults.loggingVariant; } @@ -95,6 +109,14 @@ public Builder gcfsConfigs(GetClusterNodePoolDefaultNodeConfigDefaultGcfsConfig. return gcfsConfigs(List.of(gcfsConfigs)); } @CustomType.Setter + public Builder insecureKubeletReadonlyPortEnabled(String insecureKubeletReadonlyPortEnabled) { + if (insecureKubeletReadonlyPortEnabled == null) { + throw new MissingRequiredPropertyException("GetClusterNodePoolDefaultNodeConfigDefault", "insecureKubeletReadonlyPortEnabled"); + } + this.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return this; + } + @CustomType.Setter public Builder loggingVariant(String loggingVariant) { if (loggingVariant == null) { throw new MissingRequiredPropertyException("GetClusterNodePoolDefaultNodeConfigDefault", "loggingVariant"); @@ -106,6 +128,7 @@ public GetClusterNodePoolDefaultNodeConfigDefault build() { final var _resultValue = new GetClusterNodePoolDefaultNodeConfigDefault(); _resultValue.containerdConfigs = containerdConfigs; _resultValue.gcfsConfigs = gcfsConfigs; + _resultValue.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; _resultValue.loggingVariant = loggingVariant; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolNodeConfigKubeletConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolNodeConfigKubeletConfig.java index 0c399822f4..9a0afa7c3f 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolNodeConfigKubeletConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/GetClusterNodePoolNodeConfigKubeletConfig.java @@ -27,6 +27,11 @@ public final class GetClusterNodePoolNodeConfigKubeletConfig { * */ private String cpuManagerPolicy; + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + private String insecureKubeletReadonlyPortEnabled; /** * @return Controls the maximum number of processes allowed to run in a pod. * @@ -55,6 +60,13 @@ public String cpuCfsQuotaPeriod() { public String cpuManagerPolicy() { return this.cpuManagerPolicy; } + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + public String insecureKubeletReadonlyPortEnabled() { + return this.insecureKubeletReadonlyPortEnabled; + } /** * @return Controls the maximum number of processes allowed to run in a pod. * @@ -75,6 +87,7 @@ public static final class Builder { private Boolean cpuCfsQuota; private String cpuCfsQuotaPeriod; private String cpuManagerPolicy; + private String insecureKubeletReadonlyPortEnabled; private Integer podPidsLimit; public Builder() {} public Builder(GetClusterNodePoolNodeConfigKubeletConfig defaults) { @@ -82,6 +95,7 @@ public Builder(GetClusterNodePoolNodeConfigKubeletConfig defaults) { this.cpuCfsQuota = defaults.cpuCfsQuota; this.cpuCfsQuotaPeriod = defaults.cpuCfsQuotaPeriod; this.cpuManagerPolicy = defaults.cpuManagerPolicy; + this.insecureKubeletReadonlyPortEnabled = defaults.insecureKubeletReadonlyPortEnabled; this.podPidsLimit = defaults.podPidsLimit; } @@ -110,6 +124,14 @@ public Builder cpuManagerPolicy(String cpuManagerPolicy) { return this; } @CustomType.Setter + public Builder insecureKubeletReadonlyPortEnabled(String insecureKubeletReadonlyPortEnabled) { + if (insecureKubeletReadonlyPortEnabled == null) { + throw new MissingRequiredPropertyException("GetClusterNodePoolNodeConfigKubeletConfig", "insecureKubeletReadonlyPortEnabled"); + } + this.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return this; + } + @CustomType.Setter public Builder podPidsLimit(Integer podPidsLimit) { if (podPidsLimit == null) { throw new MissingRequiredPropertyException("GetClusterNodePoolNodeConfigKubeletConfig", "podPidsLimit"); @@ -122,6 +144,7 @@ public GetClusterNodePoolNodeConfigKubeletConfig build() { _resultValue.cpuCfsQuota = cpuCfsQuota; _resultValue.cpuCfsQuotaPeriod = cpuCfsQuotaPeriod; _resultValue.cpuManagerPolicy = cpuManagerPolicy; + _resultValue.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; _resultValue.podPidsLimit = podPidsLimit; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/NodePoolNodeConfigKubeletConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/NodePoolNodeConfigKubeletConfig.java index 6526255d8f..a08e86710e 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/NodePoolNodeConfigKubeletConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/container/outputs/NodePoolNodeConfigKubeletConfig.java @@ -29,6 +29,11 @@ public final class NodePoolNodeConfigKubeletConfig { * */ private String cpuManagerPolicy; + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + private @Nullable String insecureKubeletReadonlyPortEnabled; /** * @return Controls the maximum number of processes allowed to run in a pod. * @@ -57,6 +62,13 @@ public Optional cpuCfsQuotaPeriod() { public String cpuManagerPolicy() { return this.cpuManagerPolicy; } + /** + * @return Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + * + */ + public Optional insecureKubeletReadonlyPortEnabled() { + return Optional.ofNullable(this.insecureKubeletReadonlyPortEnabled); + } /** * @return Controls the maximum number of processes allowed to run in a pod. * @@ -77,6 +89,7 @@ public static final class Builder { private @Nullable Boolean cpuCfsQuota; private @Nullable String cpuCfsQuotaPeriod; private String cpuManagerPolicy; + private @Nullable String insecureKubeletReadonlyPortEnabled; private @Nullable Integer podPidsLimit; public Builder() {} public Builder(NodePoolNodeConfigKubeletConfig defaults) { @@ -84,6 +97,7 @@ public Builder(NodePoolNodeConfigKubeletConfig defaults) { this.cpuCfsQuota = defaults.cpuCfsQuota; this.cpuCfsQuotaPeriod = defaults.cpuCfsQuotaPeriod; this.cpuManagerPolicy = defaults.cpuManagerPolicy; + this.insecureKubeletReadonlyPortEnabled = defaults.insecureKubeletReadonlyPortEnabled; this.podPidsLimit = defaults.podPidsLimit; } @@ -108,6 +122,12 @@ public Builder cpuManagerPolicy(String cpuManagerPolicy) { return this; } @CustomType.Setter + public Builder insecureKubeletReadonlyPortEnabled(@Nullable String insecureKubeletReadonlyPortEnabled) { + + this.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; + return this; + } + @CustomType.Setter public Builder podPidsLimit(@Nullable Integer podPidsLimit) { this.podPidsLimit = podPidsLimit; @@ -118,6 +138,7 @@ public NodePoolNodeConfigKubeletConfig build() { _resultValue.cpuCfsQuota = cpuCfsQuota; _resultValue.cpuCfsQuotaPeriod = cpuCfsQuotaPeriod; _resultValue.cpuManagerPolicy = cpuManagerPolicy; + _resultValue.insecureKubeletReadonlyPortEnabled = insecureKubeletReadonlyPortEnabled; _resultValue.podPidsLimit = podPidsLimit; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/ConnectionProfile.java b/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/ConnectionProfile.java index 2c99394621..3bff4b79b0 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/ConnectionProfile.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/ConnectionProfile.java @@ -379,6 +379,229 @@ * } * * <!--End PulumiCodeChooser --> + * ### Database Migration Service Connection Profile Existing Mysql + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.organizations.OrganizationsFunctions;
+ * import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
+ * import com.pulumi.gcp.sql.DatabaseInstance;
+ * import com.pulumi.gcp.sql.DatabaseInstanceArgs;
+ * import com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;
+ * import com.pulumi.gcp.databasemigrationservice.ConnectionProfile;
+ * import com.pulumi.gcp.databasemigrationservice.ConnectionProfileArgs;
+ * import com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileMysqlArgs;
+ * import com.pulumi.resources.CustomResourceOptions;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         final var project = OrganizationsFunctions.getProject();
+ * 
+ *         var destinationCsql = new DatabaseInstance("destinationCsql", DatabaseInstanceArgs.builder()
+ *             .name("destination-csql")
+ *             .databaseVersion("MYSQL_5_7")
+ *             .settings(DatabaseInstanceSettingsArgs.builder()
+ *                 .tier("db-n1-standard-1")
+ *                 .deletionProtectionEnabled(false)
+ *                 .build())
+ *             .deletionProtection(false)
+ *             .build());
+ * 
+ *         var existing_mysql = new ConnectionProfile("existing-mysql", ConnectionProfileArgs.builder()
+ *             .location("us-central1")
+ *             .connectionProfileId("destination-cp")
+ *             .displayName("destination-cp_display")
+ *             .labels(Map.of("foo", "bar"))
+ *             .mysql(ConnectionProfileMysqlArgs.builder()
+ *                 .cloudSqlId("destination-csql")
+ *                 .build())
+ *             .build(), CustomResourceOptions.builder()
+ *                 .dependsOn(destinationCsql)
+ *                 .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> + * ### Database Migration Service Connection Profile Existing Postgres + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.organizations.OrganizationsFunctions;
+ * import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
+ * import com.pulumi.gcp.sql.DatabaseInstance;
+ * import com.pulumi.gcp.sql.DatabaseInstanceArgs;
+ * import com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;
+ * import com.pulumi.gcp.databasemigrationservice.ConnectionProfile;
+ * import com.pulumi.gcp.databasemigrationservice.ConnectionProfileArgs;
+ * import com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfilePostgresqlArgs;
+ * import com.pulumi.resources.CustomResourceOptions;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         final var project = OrganizationsFunctions.getProject();
+ * 
+ *         var destinationCsql = new DatabaseInstance("destinationCsql", DatabaseInstanceArgs.builder()
+ *             .name("destination-csql")
+ *             .databaseVersion("POSTGRES_15")
+ *             .settings(DatabaseInstanceSettingsArgs.builder()
+ *                 .tier("db-custom-2-13312")
+ *                 .deletionProtectionEnabled(false)
+ *                 .build())
+ *             .deletionProtection(false)
+ *             .build());
+ * 
+ *         var existing_psql = new ConnectionProfile("existing-psql", ConnectionProfileArgs.builder()
+ *             .location("us-central1")
+ *             .connectionProfileId("destination-cp")
+ *             .displayName("destination-cp_display")
+ *             .labels(Map.of("foo", "bar"))
+ *             .postgresql(ConnectionProfilePostgresqlArgs.builder()
+ *                 .cloudSqlId("destination-csql")
+ *                 .build())
+ *             .build(), CustomResourceOptions.builder()
+ *                 .dependsOn(destinationCsql)
+ *                 .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> + * ### Database Migration Service Connection Profile Existing Alloydb + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.organizations.OrganizationsFunctions;
+ * import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
+ * import com.pulumi.gcp.compute.Network;
+ * import com.pulumi.gcp.compute.NetworkArgs;
+ * import com.pulumi.gcp.alloydb.Cluster;
+ * import com.pulumi.gcp.alloydb.ClusterArgs;
+ * import com.pulumi.gcp.alloydb.inputs.ClusterNetworkConfigArgs;
+ * import com.pulumi.gcp.alloydb.inputs.ClusterInitialUserArgs;
+ * import com.pulumi.gcp.compute.GlobalAddress;
+ * import com.pulumi.gcp.compute.GlobalAddressArgs;
+ * import com.pulumi.gcp.servicenetworking.Connection;
+ * import com.pulumi.gcp.servicenetworking.ConnectionArgs;
+ * import com.pulumi.gcp.alloydb.Instance;
+ * import com.pulumi.gcp.alloydb.InstanceArgs;
+ * import com.pulumi.gcp.databasemigrationservice.ConnectionProfile;
+ * import com.pulumi.gcp.databasemigrationservice.ConnectionProfileArgs;
+ * import com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfilePostgresqlArgs;
+ * import com.pulumi.resources.CustomResourceOptions;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         final var project = OrganizationsFunctions.getProject();
+ * 
+ *         var default_ = new Network("default", NetworkArgs.builder()
+ *             .name("destination-alloydb")
+ *             .build());
+ * 
+ *         var destinationAlloydb = new Cluster("destinationAlloydb", ClusterArgs.builder()
+ *             .clusterId("destination-alloydb")
+ *             .location("us-central1")
+ *             .networkConfig(ClusterNetworkConfigArgs.builder()
+ *                 .network(default_.id())
+ *                 .build())
+ *             .databaseVersion("POSTGRES_15")
+ *             .initialUser(ClusterInitialUserArgs.builder()
+ *                 .user("destination-alloydb")
+ *                 .password("destination-alloydb")
+ *                 .build())
+ *             .build());
+ * 
+ *         var privateIpAlloc = new GlobalAddress("privateIpAlloc", GlobalAddressArgs.builder()
+ *             .name("destination-alloydb")
+ *             .addressType("INTERNAL")
+ *             .purpose("VPC_PEERING")
+ *             .prefixLength(16)
+ *             .network(default_.id())
+ *             .build());
+ * 
+ *         var vpcConnection = new Connection("vpcConnection", ConnectionArgs.builder()
+ *             .network(default_.id())
+ *             .service("servicenetworking.googleapis.com")
+ *             .reservedPeeringRanges(privateIpAlloc.name())
+ *             .build());
+ * 
+ *         var destinationAlloydbPrimary = new Instance("destinationAlloydbPrimary", InstanceArgs.builder()
+ *             .cluster(destinationAlloydb.name())
+ *             .instanceId("destination-alloydb-primary")
+ *             .instanceType("PRIMARY")
+ *             .build(), CustomResourceOptions.builder()
+ *                 .dependsOn(vpcConnection)
+ *                 .build());
+ * 
+ *         var existing_alloydb = new ConnectionProfile("existing-alloydb", ConnectionProfileArgs.builder()
+ *             .location("us-central1")
+ *             .connectionProfileId("destination-cp")
+ *             .displayName("destination-cp_display")
+ *             .labels(Map.of("foo", "bar"))
+ *             .postgresql(ConnectionProfilePostgresqlArgs.builder()
+ *                 .alloydbClusterId("destination-alloydb")
+ *                 .build())
+ *             .build(), CustomResourceOptions.builder()
+ *                 .dependsOn(                
+ *                     destinationAlloydb,
+ *                     destinationAlloydbPrimary)
+ *                 .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> * * ## Import * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/inputs/ConnectionProfileMysqlArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/inputs/ConnectionProfileMysqlArgs.java index 6e294d499a..f9205fa8c4 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/inputs/ConnectionProfileMysqlArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/inputs/ConnectionProfileMysqlArgs.java @@ -5,7 +5,6 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; -import com.pulumi.exceptions.MissingRequiredPropertyException; import com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfileMysqlSslArgs; import java.lang.Boolean; import java.lang.Integer; @@ -35,37 +34,37 @@ public Optional> cloudSqlId() { } /** - * Required. The IP or hostname of the source MySQL database. + * The IP or hostname of the source MySQL database. * */ - @Import(name="host", required=true) - private Output host; + @Import(name="host") + private @Nullable Output host; /** - * @return Required. The IP or hostname of the source MySQL database. + * @return The IP or hostname of the source MySQL database. * */ - public Output host() { - return this.host; + public Optional> host() { + return Optional.ofNullable(this.host); } /** - * Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. * */ - @Import(name="password", required=true) - private Output password; + @Import(name="password") + private @Nullable Output password; /** - * @return Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * @return Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. * */ - public Output password() { - return this.password; + public Optional> password() { + return Optional.ofNullable(this.password); } /** @@ -86,18 +85,18 @@ public Optional> passwordSet() { } /** - * Required. The network port of the source MySQL database. + * The network port of the source MySQL database. * */ - @Import(name="port", required=true) - private Output port; + @Import(name="port") + private @Nullable Output port; /** - * @return Required. The network port of the source MySQL database. + * @return The network port of the source MySQL database. * */ - public Output port() { - return this.port; + public Optional> port() { + return Optional.ofNullable(this.port); } /** @@ -118,18 +117,18 @@ public Optional> ssl() { } /** - * Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. * */ - @Import(name="username", required=true) - private Output username; + @Import(name="username") + private @Nullable Output username; /** - * @return Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * @return The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. * */ - public Output username() { - return this.username; + public Optional> username() { + return Optional.ofNullable(this.username); } private ConnectionProfileMysqlArgs() {} @@ -184,18 +183,18 @@ public Builder cloudSqlId(String cloudSqlId) { } /** - * @param host Required. The IP or hostname of the source MySQL database. + * @param host The IP or hostname of the source MySQL database. * * @return builder * */ - public Builder host(Output host) { + public Builder host(@Nullable Output host) { $.host = host; return this; } /** - * @param host Required. The IP or hostname of the source MySQL database. + * @param host The IP or hostname of the source MySQL database. * * @return builder * @@ -205,20 +204,20 @@ public Builder host(String host) { } /** - * @param password Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * @param password Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. * * @return builder * */ - public Builder password(Output password) { + public Builder password(@Nullable Output password) { $.password = password; return this; } /** - * @param password Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * @param password Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. * @@ -253,18 +252,18 @@ public Builder passwordSet(Boolean passwordSet) { } /** - * @param port Required. The network port of the source MySQL database. + * @param port The network port of the source MySQL database. * * @return builder * */ - public Builder port(Output port) { + public Builder port(@Nullable Output port) { $.port = port; return this; } /** - * @param port Required. The network port of the source MySQL database. + * @param port The network port of the source MySQL database. * * @return builder * @@ -297,18 +296,18 @@ public Builder ssl(ConnectionProfileMysqlSslArgs ssl) { } /** - * @param username Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * @param username The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. * * @return builder * */ - public Builder username(Output username) { + public Builder username(@Nullable Output username) { $.username = username; return this; } /** - * @param username Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * @param username The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. * * @return builder * @@ -318,18 +317,6 @@ public Builder username(String username) { } public ConnectionProfileMysqlArgs build() { - if ($.host == null) { - throw new MissingRequiredPropertyException("ConnectionProfileMysqlArgs", "host"); - } - if ($.password == null) { - throw new MissingRequiredPropertyException("ConnectionProfileMysqlArgs", "password"); - } - if ($.port == null) { - throw new MissingRequiredPropertyException("ConnectionProfileMysqlArgs", "port"); - } - if ($.username == null) { - throw new MissingRequiredPropertyException("ConnectionProfileMysqlArgs", "username"); - } return $; } } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/inputs/ConnectionProfilePostgresqlArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/inputs/ConnectionProfilePostgresqlArgs.java index c540bc5df8..2497a2aeb2 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/inputs/ConnectionProfilePostgresqlArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/inputs/ConnectionProfilePostgresqlArgs.java @@ -5,7 +5,6 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; -import com.pulumi.exceptions.MissingRequiredPropertyException; import com.pulumi.gcp.databasemigrationservice.inputs.ConnectionProfilePostgresqlSslArgs; import java.lang.Boolean; import java.lang.Integer; @@ -19,6 +18,21 @@ public final class ConnectionProfilePostgresqlArgs extends com.pulumi.resources. public static final ConnectionProfilePostgresqlArgs Empty = new ConnectionProfilePostgresqlArgs(); + /** + * If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. + * + */ + @Import(name="alloydbClusterId") + private @Nullable Output alloydbClusterId; + + /** + * @return If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. + * + */ + public Optional> alloydbClusterId() { + return Optional.ofNullable(this.alloydbClusterId); + } + /** * If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. * @@ -35,18 +49,18 @@ public Optional> cloudSqlId() { } /** - * Required. The IP or hostname of the source MySQL database. + * The IP or hostname of the source MySQL database. * */ - @Import(name="host", required=true) - private Output host; + @Import(name="host") + private @Nullable Output host; /** - * @return Required. The IP or hostname of the source MySQL database. + * @return The IP or hostname of the source MySQL database. * */ - public Output host() { - return this.host; + public Optional> host() { + return Optional.ofNullable(this.host); } /** @@ -67,22 +81,22 @@ public Optional> networkArchitecture() { } /** - * Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. * */ - @Import(name="password", required=true) - private Output password; + @Import(name="password") + private @Nullable Output password; /** - * @return Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * @return Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. * */ - public Output password() { - return this.password; + public Optional> password() { + return Optional.ofNullable(this.password); } /** @@ -103,18 +117,18 @@ public Optional> passwordSet() { } /** - * Required. The network port of the source MySQL database. + * The network port of the source MySQL database. * */ - @Import(name="port", required=true) - private Output port; + @Import(name="port") + private @Nullable Output port; /** - * @return Required. The network port of the source MySQL database. + * @return The network port of the source MySQL database. * */ - public Output port() { - return this.port; + public Optional> port() { + return Optional.ofNullable(this.port); } /** @@ -135,23 +149,24 @@ public Optional> ssl() { } /** - * Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. * */ - @Import(name="username", required=true) - private Output username; + @Import(name="username") + private @Nullable Output username; /** - * @return Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * @return The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. * */ - public Output username() { - return this.username; + public Optional> username() { + return Optional.ofNullable(this.username); } private ConnectionProfilePostgresqlArgs() {} private ConnectionProfilePostgresqlArgs(ConnectionProfilePostgresqlArgs $) { + this.alloydbClusterId = $.alloydbClusterId; this.cloudSqlId = $.cloudSqlId; this.host = $.host; this.networkArchitecture = $.networkArchitecture; @@ -180,6 +195,27 @@ public Builder(ConnectionProfilePostgresqlArgs defaults) { $ = new ConnectionProfilePostgresqlArgs(Objects.requireNonNull(defaults)); } + /** + * @param alloydbClusterId If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. + * + * @return builder + * + */ + public Builder alloydbClusterId(@Nullable Output alloydbClusterId) { + $.alloydbClusterId = alloydbClusterId; + return this; + } + + /** + * @param alloydbClusterId If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. + * + * @return builder + * + */ + public Builder alloydbClusterId(String alloydbClusterId) { + return alloydbClusterId(Output.of(alloydbClusterId)); + } + /** * @param cloudSqlId If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. * @@ -202,18 +238,18 @@ public Builder cloudSqlId(String cloudSqlId) { } /** - * @param host Required. The IP or hostname of the source MySQL database. + * @param host The IP or hostname of the source MySQL database. * * @return builder * */ - public Builder host(Output host) { + public Builder host(@Nullable Output host) { $.host = host; return this; } /** - * @param host Required. The IP or hostname of the source MySQL database. + * @param host The IP or hostname of the source MySQL database. * * @return builder * @@ -246,20 +282,20 @@ public Builder networkArchitecture(String networkArchitecture) { } /** - * @param password Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * @param password Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. * * @return builder * */ - public Builder password(Output password) { + public Builder password(@Nullable Output password) { $.password = password; return this; } /** - * @param password Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * @param password Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. * @@ -294,18 +330,18 @@ public Builder passwordSet(Boolean passwordSet) { } /** - * @param port Required. The network port of the source MySQL database. + * @param port The network port of the source MySQL database. * * @return builder * */ - public Builder port(Output port) { + public Builder port(@Nullable Output port) { $.port = port; return this; } /** - * @param port Required. The network port of the source MySQL database. + * @param port The network port of the source MySQL database. * * @return builder * @@ -338,18 +374,18 @@ public Builder ssl(ConnectionProfilePostgresqlSslArgs ssl) { } /** - * @param username Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * @param username The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. * * @return builder * */ - public Builder username(Output username) { + public Builder username(@Nullable Output username) { $.username = username; return this; } /** - * @param username Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * @param username The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. * * @return builder * @@ -359,18 +395,6 @@ public Builder username(String username) { } public ConnectionProfilePostgresqlArgs build() { - if ($.host == null) { - throw new MissingRequiredPropertyException("ConnectionProfilePostgresqlArgs", "host"); - } - if ($.password == null) { - throw new MissingRequiredPropertyException("ConnectionProfilePostgresqlArgs", "password"); - } - if ($.port == null) { - throw new MissingRequiredPropertyException("ConnectionProfilePostgresqlArgs", "port"); - } - if ($.username == null) { - throw new MissingRequiredPropertyException("ConnectionProfilePostgresqlArgs", "username"); - } return $; } } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/outputs/ConnectionProfileMysql.java b/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/outputs/ConnectionProfileMysql.java index 508e013ff1..4eb2fb5edd 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/outputs/ConnectionProfileMysql.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/outputs/ConnectionProfileMysql.java @@ -4,7 +4,6 @@ package com.pulumi.gcp.databasemigrationservice.outputs; import com.pulumi.core.annotations.CustomType; -import com.pulumi.exceptions.MissingRequiredPropertyException; import com.pulumi.gcp.databasemigrationservice.outputs.ConnectionProfileMysqlSsl; import java.lang.Boolean; import java.lang.Integer; @@ -21,17 +20,17 @@ public final class ConnectionProfileMysql { */ private @Nullable String cloudSqlId; /** - * @return Required. The IP or hostname of the source MySQL database. + * @return The IP or hostname of the source MySQL database. * */ - private String host; + private @Nullable String host; /** - * @return Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * @return Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. * */ - private String password; + private @Nullable String password; /** * @return (Output) * Output only. Indicates If this connection profile password is stored. @@ -39,10 +38,10 @@ public final class ConnectionProfileMysql { */ private @Nullable Boolean passwordSet; /** - * @return Required. The network port of the source MySQL database. + * @return The network port of the source MySQL database. * */ - private Integer port; + private @Nullable Integer port; /** * @return SSL configuration for the destination to connect to the source database. * Structure is documented below. @@ -50,10 +49,10 @@ public final class ConnectionProfileMysql { */ private @Nullable ConnectionProfileMysqlSsl ssl; /** - * @return Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * @return The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. * */ - private String username; + private @Nullable String username; private ConnectionProfileMysql() {} /** @@ -64,20 +63,20 @@ public Optional cloudSqlId() { return Optional.ofNullable(this.cloudSqlId); } /** - * @return Required. The IP or hostname of the source MySQL database. + * @return The IP or hostname of the source MySQL database. * */ - public String host() { - return this.host; + public Optional host() { + return Optional.ofNullable(this.host); } /** - * @return Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * @return Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. * */ - public String password() { - return this.password; + public Optional password() { + return Optional.ofNullable(this.password); } /** * @return (Output) @@ -88,11 +87,11 @@ public Optional passwordSet() { return Optional.ofNullable(this.passwordSet); } /** - * @return Required. The network port of the source MySQL database. + * @return The network port of the source MySQL database. * */ - public Integer port() { - return this.port; + public Optional port() { + return Optional.ofNullable(this.port); } /** * @return SSL configuration for the destination to connect to the source database. @@ -103,11 +102,11 @@ public Optional ssl() { return Optional.ofNullable(this.ssl); } /** - * @return Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * @return The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. * */ - public String username() { - return this.username; + public Optional username() { + return Optional.ofNullable(this.username); } public static Builder builder() { @@ -120,12 +119,12 @@ public static Builder builder(ConnectionProfileMysql defaults) { @CustomType.Builder public static final class Builder { private @Nullable String cloudSqlId; - private String host; - private String password; + private @Nullable String host; + private @Nullable String password; private @Nullable Boolean passwordSet; - private Integer port; + private @Nullable Integer port; private @Nullable ConnectionProfileMysqlSsl ssl; - private String username; + private @Nullable String username; public Builder() {} public Builder(ConnectionProfileMysql defaults) { Objects.requireNonNull(defaults); @@ -145,18 +144,14 @@ public Builder cloudSqlId(@Nullable String cloudSqlId) { return this; } @CustomType.Setter - public Builder host(String host) { - if (host == null) { - throw new MissingRequiredPropertyException("ConnectionProfileMysql", "host"); - } + public Builder host(@Nullable String host) { + this.host = host; return this; } @CustomType.Setter - public Builder password(String password) { - if (password == null) { - throw new MissingRequiredPropertyException("ConnectionProfileMysql", "password"); - } + public Builder password(@Nullable String password) { + this.password = password; return this; } @@ -167,10 +162,8 @@ public Builder passwordSet(@Nullable Boolean passwordSet) { return this; } @CustomType.Setter - public Builder port(Integer port) { - if (port == null) { - throw new MissingRequiredPropertyException("ConnectionProfileMysql", "port"); - } + public Builder port(@Nullable Integer port) { + this.port = port; return this; } @@ -181,10 +174,8 @@ public Builder ssl(@Nullable ConnectionProfileMysqlSsl ssl) { return this; } @CustomType.Setter - public Builder username(String username) { - if (username == null) { - throw new MissingRequiredPropertyException("ConnectionProfileMysql", "username"); - } + public Builder username(@Nullable String username) { + this.username = username; return this; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/outputs/ConnectionProfilePostgresql.java b/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/outputs/ConnectionProfilePostgresql.java index 220d7d634e..785e5f8345 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/outputs/ConnectionProfilePostgresql.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/databasemigrationservice/outputs/ConnectionProfilePostgresql.java @@ -4,7 +4,6 @@ package com.pulumi.gcp.databasemigrationservice.outputs; import com.pulumi.core.annotations.CustomType; -import com.pulumi.exceptions.MissingRequiredPropertyException; import com.pulumi.gcp.databasemigrationservice.outputs.ConnectionProfilePostgresqlSsl; import java.lang.Boolean; import java.lang.Integer; @@ -15,16 +14,21 @@ @CustomType public final class ConnectionProfilePostgresql { + /** + * @return If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. + * + */ + private @Nullable String alloydbClusterId; /** * @return If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. * */ private @Nullable String cloudSqlId; /** - * @return Required. The IP or hostname of the source MySQL database. + * @return The IP or hostname of the source MySQL database. * */ - private String host; + private @Nullable String host; /** * @return (Output) * Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with. @@ -32,12 +36,12 @@ public final class ConnectionProfilePostgresql { */ private @Nullable String networkArchitecture; /** - * @return Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * @return Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. * */ - private String password; + private @Nullable String password; /** * @return (Output) * Output only. Indicates If this connection profile password is stored. @@ -45,10 +49,10 @@ public final class ConnectionProfilePostgresql { */ private @Nullable Boolean passwordSet; /** - * @return Required. The network port of the source MySQL database. + * @return The network port of the source MySQL database. * */ - private Integer port; + private @Nullable Integer port; /** * @return SSL configuration for the destination to connect to the source database. * Structure is documented below. @@ -56,12 +60,19 @@ public final class ConnectionProfilePostgresql { */ private @Nullable ConnectionProfilePostgresqlSsl ssl; /** - * @return Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * @return The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. * */ - private String username; + private @Nullable String username; private ConnectionProfilePostgresql() {} + /** + * @return If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. + * + */ + public Optional alloydbClusterId() { + return Optional.ofNullable(this.alloydbClusterId); + } /** * @return If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. * @@ -70,11 +81,11 @@ public Optional cloudSqlId() { return Optional.ofNullable(this.cloudSqlId); } /** - * @return Required. The IP or hostname of the source MySQL database. + * @return The IP or hostname of the source MySQL database. * */ - public String host() { - return this.host; + public Optional host() { + return Optional.ofNullable(this.host); } /** * @return (Output) @@ -85,13 +96,13 @@ public Optional networkArchitecture() { return Optional.ofNullable(this.networkArchitecture); } /** - * @return Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * @return Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. * */ - public String password() { - return this.password; + public Optional password() { + return Optional.ofNullable(this.password); } /** * @return (Output) @@ -102,11 +113,11 @@ public Optional passwordSet() { return Optional.ofNullable(this.passwordSet); } /** - * @return Required. The network port of the source MySQL database. + * @return The network port of the source MySQL database. * */ - public Integer port() { - return this.port; + public Optional port() { + return Optional.ofNullable(this.port); } /** * @return SSL configuration for the destination to connect to the source database. @@ -117,11 +128,11 @@ public Optional ssl() { return Optional.ofNullable(this.ssl); } /** - * @return Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * @return The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. * */ - public String username() { - return this.username; + public Optional username() { + return Optional.ofNullable(this.username); } public static Builder builder() { @@ -133,17 +144,19 @@ public static Builder builder(ConnectionProfilePostgresql defaults) { } @CustomType.Builder public static final class Builder { + private @Nullable String alloydbClusterId; private @Nullable String cloudSqlId; - private String host; + private @Nullable String host; private @Nullable String networkArchitecture; - private String password; + private @Nullable String password; private @Nullable Boolean passwordSet; - private Integer port; + private @Nullable Integer port; private @Nullable ConnectionProfilePostgresqlSsl ssl; - private String username; + private @Nullable String username; public Builder() {} public Builder(ConnectionProfilePostgresql defaults) { Objects.requireNonNull(defaults); + this.alloydbClusterId = defaults.alloydbClusterId; this.cloudSqlId = defaults.cloudSqlId; this.host = defaults.host; this.networkArchitecture = defaults.networkArchitecture; @@ -154,6 +167,12 @@ public Builder(ConnectionProfilePostgresql defaults) { this.username = defaults.username; } + @CustomType.Setter + public Builder alloydbClusterId(@Nullable String alloydbClusterId) { + + this.alloydbClusterId = alloydbClusterId; + return this; + } @CustomType.Setter public Builder cloudSqlId(@Nullable String cloudSqlId) { @@ -161,10 +180,8 @@ public Builder cloudSqlId(@Nullable String cloudSqlId) { return this; } @CustomType.Setter - public Builder host(String host) { - if (host == null) { - throw new MissingRequiredPropertyException("ConnectionProfilePostgresql", "host"); - } + public Builder host(@Nullable String host) { + this.host = host; return this; } @@ -175,10 +192,8 @@ public Builder networkArchitecture(@Nullable String networkArchitecture) { return this; } @CustomType.Setter - public Builder password(String password) { - if (password == null) { - throw new MissingRequiredPropertyException("ConnectionProfilePostgresql", "password"); - } + public Builder password(@Nullable String password) { + this.password = password; return this; } @@ -189,10 +204,8 @@ public Builder passwordSet(@Nullable Boolean passwordSet) { return this; } @CustomType.Setter - public Builder port(Integer port) { - if (port == null) { - throw new MissingRequiredPropertyException("ConnectionProfilePostgresql", "port"); - } + public Builder port(@Nullable Integer port) { + this.port = port; return this; } @@ -203,15 +216,14 @@ public Builder ssl(@Nullable ConnectionProfilePostgresqlSsl ssl) { return this; } @CustomType.Setter - public Builder username(String username) { - if (username == null) { - throw new MissingRequiredPropertyException("ConnectionProfilePostgresql", "username"); - } + public Builder username(@Nullable String username) { + this.username = username; return this; } public ConnectionProfilePostgresql build() { final var _resultValue = new ConnectionProfilePostgresql(); + _resultValue.alloydbClusterId = alloydbClusterId; _resultValue.cloudSqlId = cloudSqlId; _resultValue.host = host; _resultValue.networkArchitecture = networkArchitecture; diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionArgs.java index 5b8ea12a70..487cc95c37 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionArgs.java @@ -7,6 +7,7 @@ import com.pulumi.core.annotations.Import; import com.pulumi.gcp.dataloss.inputs.PreventionDiscoveryConfigActionExportDataArgs; import com.pulumi.gcp.dataloss.inputs.PreventionDiscoveryConfigActionPubSubNotificationArgs; +import com.pulumi.gcp.dataloss.inputs.PreventionDiscoveryConfigActionTagResourcesArgs; import java.util.Objects; import java.util.Optional; import javax.annotation.Nullable; @@ -50,11 +51,29 @@ public Optional> p return Optional.ofNullable(this.pubSubNotification); } + /** + * Publish a message into the Pub/Sub topic. + * Structure is documented below. + * + */ + @Import(name="tagResources") + private @Nullable Output tagResources; + + /** + * @return Publish a message into the Pub/Sub topic. + * Structure is documented below. + * + */ + public Optional> tagResources() { + return Optional.ofNullable(this.tagResources); + } + private PreventionDiscoveryConfigActionArgs() {} private PreventionDiscoveryConfigActionArgs(PreventionDiscoveryConfigActionArgs $) { this.exportData = $.exportData; this.pubSubNotification = $.pubSubNotification; + this.tagResources = $.tagResources; } public static Builder builder() { @@ -121,6 +140,29 @@ public Builder pubSubNotification(PreventionDiscoveryConfigActionPubSubNotificat return pubSubNotification(Output.of(pubSubNotification)); } + /** + * @param tagResources Publish a message into the Pub/Sub topic. + * Structure is documented below. + * + * @return builder + * + */ + public Builder tagResources(@Nullable Output tagResources) { + $.tagResources = tagResources; + return this; + } + + /** + * @param tagResources Publish a message into the Pub/Sub topic. + * Structure is documented below. + * + * @return builder + * + */ + public Builder tagResources(PreventionDiscoveryConfigActionTagResourcesArgs tagResources) { + return tagResources(Output.of(tagResources)); + } + public PreventionDiscoveryConfigActionArgs build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesArgs.java new file mode 100644 index 0000000000..29d7849032 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesArgs.java @@ -0,0 +1,190 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.dataloss.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.gcp.dataloss.inputs.PreventionDiscoveryConfigActionTagResourcesTagConditionArgs; +import java.lang.Boolean; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class PreventionDiscoveryConfigActionTagResourcesArgs extends com.pulumi.resources.ResourceArgs { + + public static final PreventionDiscoveryConfigActionTagResourcesArgs Empty = new PreventionDiscoveryConfigActionTagResourcesArgs(); + + /** + * Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + * + */ + @Import(name="lowerDataRiskToLow") + private @Nullable Output lowerDataRiskToLow; + + /** + * @return Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + * + */ + public Optional> lowerDataRiskToLow() { + return Optional.ofNullable(this.lowerDataRiskToLow); + } + + /** + * The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + * Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + * + */ + @Import(name="profileGenerationsToTags") + private @Nullable Output> profileGenerationsToTags; + + /** + * @return The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + * Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + * + */ + public Optional>> profileGenerationsToTags() { + return Optional.ofNullable(this.profileGenerationsToTags); + } + + /** + * The tags to associate with different conditions. + * Structure is documented below. + * + */ + @Import(name="tagConditions") + private @Nullable Output> tagConditions; + + /** + * @return The tags to associate with different conditions. + * Structure is documented below. + * + */ + public Optional>> tagConditions() { + return Optional.ofNullable(this.tagConditions); + } + + private PreventionDiscoveryConfigActionTagResourcesArgs() {} + + private PreventionDiscoveryConfigActionTagResourcesArgs(PreventionDiscoveryConfigActionTagResourcesArgs $) { + this.lowerDataRiskToLow = $.lowerDataRiskToLow; + this.profileGenerationsToTags = $.profileGenerationsToTags; + this.tagConditions = $.tagConditions; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(PreventionDiscoveryConfigActionTagResourcesArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private PreventionDiscoveryConfigActionTagResourcesArgs $; + + public Builder() { + $ = new PreventionDiscoveryConfigActionTagResourcesArgs(); + } + + public Builder(PreventionDiscoveryConfigActionTagResourcesArgs defaults) { + $ = new PreventionDiscoveryConfigActionTagResourcesArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param lowerDataRiskToLow Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + * + * @return builder + * + */ + public Builder lowerDataRiskToLow(@Nullable Output lowerDataRiskToLow) { + $.lowerDataRiskToLow = lowerDataRiskToLow; + return this; + } + + /** + * @param lowerDataRiskToLow Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + * + * @return builder + * + */ + public Builder lowerDataRiskToLow(Boolean lowerDataRiskToLow) { + return lowerDataRiskToLow(Output.of(lowerDataRiskToLow)); + } + + /** + * @param profileGenerationsToTags The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + * Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + * + * @return builder + * + */ + public Builder profileGenerationsToTags(@Nullable Output> profileGenerationsToTags) { + $.profileGenerationsToTags = profileGenerationsToTags; + return this; + } + + /** + * @param profileGenerationsToTags The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + * Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + * + * @return builder + * + */ + public Builder profileGenerationsToTags(List profileGenerationsToTags) { + return profileGenerationsToTags(Output.of(profileGenerationsToTags)); + } + + /** + * @param profileGenerationsToTags The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + * Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + * + * @return builder + * + */ + public Builder profileGenerationsToTags(String... profileGenerationsToTags) { + return profileGenerationsToTags(List.of(profileGenerationsToTags)); + } + + /** + * @param tagConditions The tags to associate with different conditions. + * Structure is documented below. + * + * @return builder + * + */ + public Builder tagConditions(@Nullable Output> tagConditions) { + $.tagConditions = tagConditions; + return this; + } + + /** + * @param tagConditions The tags to associate with different conditions. + * Structure is documented below. + * + * @return builder + * + */ + public Builder tagConditions(List tagConditions) { + return tagConditions(Output.of(tagConditions)); + } + + /** + * @param tagConditions The tags to associate with different conditions. + * Structure is documented below. + * + * @return builder + * + */ + public Builder tagConditions(PreventionDiscoveryConfigActionTagResourcesTagConditionArgs... tagConditions) { + return tagConditions(List.of(tagConditions)); + } + + public PreventionDiscoveryConfigActionTagResourcesArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionArgs.java new file mode 100644 index 0000000000..482de1d980 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionArgs.java @@ -0,0 +1,129 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.dataloss.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.gcp.dataloss.inputs.PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs; +import com.pulumi.gcp.dataloss.inputs.PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class PreventionDiscoveryConfigActionTagResourcesTagConditionArgs extends com.pulumi.resources.ResourceArgs { + + public static final PreventionDiscoveryConfigActionTagResourcesTagConditionArgs Empty = new PreventionDiscoveryConfigActionTagResourcesTagConditionArgs(); + + /** + * Conditions attaching the tag to a resource on its profile having this sensitivity score. + * Structure is documented below. + * + */ + @Import(name="sensitivityScore") + private @Nullable Output sensitivityScore; + + /** + * @return Conditions attaching the tag to a resource on its profile having this sensitivity score. + * Structure is documented below. + * + */ + public Optional> sensitivityScore() { + return Optional.ofNullable(this.sensitivityScore); + } + + /** + * The tag value to attach to resources. + * Structure is documented below. + * + */ + @Import(name="tag") + private @Nullable Output tag; + + /** + * @return The tag value to attach to resources. + * Structure is documented below. + * + */ + public Optional> tag() { + return Optional.ofNullable(this.tag); + } + + private PreventionDiscoveryConfigActionTagResourcesTagConditionArgs() {} + + private PreventionDiscoveryConfigActionTagResourcesTagConditionArgs(PreventionDiscoveryConfigActionTagResourcesTagConditionArgs $) { + this.sensitivityScore = $.sensitivityScore; + this.tag = $.tag; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(PreventionDiscoveryConfigActionTagResourcesTagConditionArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private PreventionDiscoveryConfigActionTagResourcesTagConditionArgs $; + + public Builder() { + $ = new PreventionDiscoveryConfigActionTagResourcesTagConditionArgs(); + } + + public Builder(PreventionDiscoveryConfigActionTagResourcesTagConditionArgs defaults) { + $ = new PreventionDiscoveryConfigActionTagResourcesTagConditionArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param sensitivityScore Conditions attaching the tag to a resource on its profile having this sensitivity score. + * Structure is documented below. + * + * @return builder + * + */ + public Builder sensitivityScore(@Nullable Output sensitivityScore) { + $.sensitivityScore = sensitivityScore; + return this; + } + + /** + * @param sensitivityScore Conditions attaching the tag to a resource on its profile having this sensitivity score. + * Structure is documented below. + * + * @return builder + * + */ + public Builder sensitivityScore(PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs sensitivityScore) { + return sensitivityScore(Output.of(sensitivityScore)); + } + + /** + * @param tag The tag value to attach to resources. + * Structure is documented below. + * + * @return builder + * + */ + public Builder tag(@Nullable Output tag) { + $.tag = tag; + return this; + } + + /** + * @param tag The tag value to attach to resources. + * Structure is documented below. + * + * @return builder + * + */ + public Builder tag(PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs tag) { + return tag(Output.of(tag)); + } + + public PreventionDiscoveryConfigActionTagResourcesTagConditionArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs.java new file mode 100644 index 0000000000..6e2ef2354d --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs.java @@ -0,0 +1,89 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.dataloss.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + + +public final class PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs extends com.pulumi.resources.ResourceArgs { + + public static final PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs Empty = new PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs(); + + /** + * The sensitivity score applied to the resource. + * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + * + */ + @Import(name="score", required=true) + private Output score; + + /** + * @return The sensitivity score applied to the resource. + * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + * + */ + public Output score() { + return this.score; + } + + private PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs() {} + + private PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs(PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs $) { + this.score = $.score; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs $; + + public Builder() { + $ = new PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs(); + } + + public Builder(PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs defaults) { + $ = new PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param score The sensitivity score applied to the resource. + * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + * + * @return builder + * + */ + public Builder score(Output score) { + $.score = score; + return this; + } + + /** + * @param score The sensitivity score applied to the resource. + * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + * + * @return builder + * + */ + public Builder score(String score) { + return score(Output.of(score)); + } + + public PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs build() { + if ($.score == null) { + throw new MissingRequiredPropertyException("PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs", "score"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs.java new file mode 100644 index 0000000000..80ff455ba9 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs.java @@ -0,0 +1,83 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.dataloss.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs extends com.pulumi.resources.ResourceArgs { + + public static final PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs Empty = new PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs(); + + /** + * The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + * + */ + @Import(name="namespacedValue") + private @Nullable Output namespacedValue; + + /** + * @return The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + * + */ + public Optional> namespacedValue() { + return Optional.ofNullable(this.namespacedValue); + } + + private PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs() {} + + private PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs(PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs $) { + this.namespacedValue = $.namespacedValue; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs $; + + public Builder() { + $ = new PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs(); + } + + public Builder(PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs defaults) { + $ = new PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param namespacedValue The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + * + * @return builder + * + */ + public Builder namespacedValue(@Nullable Output namespacedValue) { + $.namespacedValue = namespacedValue; + return this; + } + + /** + * @param namespacedValue The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + * + * @return builder + * + */ + public Builder namespacedValue(String namespacedValue) { + return namespacedValue(Output.of(namespacedValue)); + } + + public PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs.java index ba365317e1..0a63b82ba7 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs.java @@ -5,6 +5,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; +import com.pulumi.gcp.dataloss.inputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs; import com.pulumi.gcp.dataloss.inputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadenceArgs; import com.pulumi.gcp.dataloss.inputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadenceArgs; import java.util.Objects; @@ -16,6 +17,23 @@ public final class PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs exte public static final PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs Empty = new PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs(); + /** + * Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + * Structure is documented below. + * + */ + @Import(name="inspectTemplateModifiedCadence") + private @Nullable Output inspectTemplateModifiedCadence; + + /** + * @return Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + * Structure is documented below. + * + */ + public Optional> inspectTemplateModifiedCadence() { + return Optional.ofNullable(this.inspectTemplateModifiedCadence); + } + /** * Governs when to update data profiles when a schema is modified * Structure is documented below. @@ -53,6 +71,7 @@ public Optional inspectTemplateModifiedCadence) { + $.inspectTemplateModifiedCadence = inspectTemplateModifiedCadence; + return this; + } + + /** + * @param inspectTemplateModifiedCadence Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + * Structure is documented below. + * + * @return builder + * + */ + public Builder inspectTemplateModifiedCadence(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs inspectTemplateModifiedCadence) { + return inspectTemplateModifiedCadence(Output.of(inspectTemplateModifiedCadence)); + } + /** * @param schemaModifiedCadence Governs when to update data profiles when a schema is modified * Structure is documented below. diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs.java new file mode 100644 index 0000000000..4006d8ad8b --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs.java @@ -0,0 +1,87 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.dataloss.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs extends com.pulumi.resources.ResourceArgs { + + public static final PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs Empty = new PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs(); + + /** + * How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + * + */ + @Import(name="frequency") + private @Nullable Output frequency; + + /** + * @return How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + * + */ + public Optional> frequency() { + return Optional.ofNullable(this.frequency); + } + + private PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs() {} + + private PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs $) { + this.frequency = $.frequency; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs $; + + public Builder() { + $ = new PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs(); + } + + public Builder(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs defaults) { + $ = new PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param frequency How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + * + * @return builder + * + */ + public Builder frequency(@Nullable Output frequency) { + $.frequency = frequency; + return this; + } + + /** + * @param frequency How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + * + * @return builder + * + */ + public Builder frequency(String frequency) { + return frequency(Output.of(frequency)); + } + + public PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs.java index 89ea910071..a5dc932e6f 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs.java @@ -5,6 +5,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; +import com.pulumi.gcp.dataloss.inputs.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs; import com.pulumi.gcp.dataloss.inputs.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadenceArgs; import java.lang.String; import java.util.Objects; @@ -16,6 +17,23 @@ public final class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenc public static final PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs Empty = new PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs(); + /** + * Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + * Structure is documented below. + * + */ + @Import(name="inspectTemplateModifiedCadence") + private @Nullable Output inspectTemplateModifiedCadence; + + /** + * @return Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + * Structure is documented below. + * + */ + public Optional> inspectTemplateModifiedCadence() { + return Optional.ofNullable(this.inspectTemplateModifiedCadence); + } + /** * Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. @@ -53,6 +71,7 @@ public Optional inspectTemplateModifiedCadence) { + $.inspectTemplateModifiedCadence = inspectTemplateModifiedCadence; + return this; + } + + /** + * @param inspectTemplateModifiedCadence Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + * Structure is documented below. + * + * @return builder + * + */ + public Builder inspectTemplateModifiedCadence(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs inspectTemplateModifiedCadence) { + return inspectTemplateModifiedCadence(Output.of(inspectTemplateModifiedCadence)); + } + /** * @param refreshFrequency Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs.java new file mode 100644 index 0000000000..58c085f705 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/inputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs.java @@ -0,0 +1,89 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.dataloss.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + + +public final class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs extends com.pulumi.resources.ResourceArgs { + + public static final PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs Empty = new PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs(); + + /** + * How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + * + */ + @Import(name="frequency", required=true) + private Output frequency; + + /** + * @return How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + * + */ + public Output frequency() { + return this.frequency; + } + + private PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs() {} + + private PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs $) { + this.frequency = $.frequency; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs $; + + public Builder() { + $ = new PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs(); + } + + public Builder(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs defaults) { + $ = new PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param frequency How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + * + * @return builder + * + */ + public Builder frequency(Output frequency) { + $.frequency = frequency; + return this; + } + + /** + * @param frequency How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + * + * @return builder + * + */ + public Builder frequency(String frequency) { + return frequency(Output.of(frequency)); + } + + public PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs build() { + if ($.frequency == null) { + throw new MissingRequiredPropertyException("PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs", "frequency"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigAction.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigAction.java index bd2828e059..d105593fc7 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigAction.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigAction.java @@ -6,6 +6,7 @@ import com.pulumi.core.annotations.CustomType; import com.pulumi.gcp.dataloss.outputs.PreventionDiscoveryConfigActionExportData; import com.pulumi.gcp.dataloss.outputs.PreventionDiscoveryConfigActionPubSubNotification; +import com.pulumi.gcp.dataloss.outputs.PreventionDiscoveryConfigActionTagResources; import java.util.Objects; import java.util.Optional; import javax.annotation.Nullable; @@ -24,6 +25,12 @@ public final class PreventionDiscoveryConfigAction { * */ private @Nullable PreventionDiscoveryConfigActionPubSubNotification pubSubNotification; + /** + * @return Publish a message into the Pub/Sub topic. + * Structure is documented below. + * + */ + private @Nullable PreventionDiscoveryConfigActionTagResources tagResources; private PreventionDiscoveryConfigAction() {} /** @@ -42,6 +49,14 @@ public Optional exportData() { public Optional pubSubNotification() { return Optional.ofNullable(this.pubSubNotification); } + /** + * @return Publish a message into the Pub/Sub topic. + * Structure is documented below. + * + */ + public Optional tagResources() { + return Optional.ofNullable(this.tagResources); + } public static Builder builder() { return new Builder(); @@ -54,11 +69,13 @@ public static Builder builder(PreventionDiscoveryConfigAction defaults) { public static final class Builder { private @Nullable PreventionDiscoveryConfigActionExportData exportData; private @Nullable PreventionDiscoveryConfigActionPubSubNotification pubSubNotification; + private @Nullable PreventionDiscoveryConfigActionTagResources tagResources; public Builder() {} public Builder(PreventionDiscoveryConfigAction defaults) { Objects.requireNonNull(defaults); this.exportData = defaults.exportData; this.pubSubNotification = defaults.pubSubNotification; + this.tagResources = defaults.tagResources; } @CustomType.Setter @@ -73,10 +90,17 @@ public Builder pubSubNotification(@Nullable PreventionDiscoveryConfigActionPubSu this.pubSubNotification = pubSubNotification; return this; } + @CustomType.Setter + public Builder tagResources(@Nullable PreventionDiscoveryConfigActionTagResources tagResources) { + + this.tagResources = tagResources; + return this; + } public PreventionDiscoveryConfigAction build() { final var _resultValue = new PreventionDiscoveryConfigAction(); _resultValue.exportData = exportData; _resultValue.pubSubNotification = pubSubNotification; + _resultValue.tagResources = tagResources; return _resultValue; } } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResources.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResources.java new file mode 100644 index 0000000000..88b7cd9b79 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResources.java @@ -0,0 +1,112 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.dataloss.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.gcp.dataloss.outputs.PreventionDiscoveryConfigActionTagResourcesTagCondition; +import java.lang.Boolean; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class PreventionDiscoveryConfigActionTagResources { + /** + * @return Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + * + */ + private @Nullable Boolean lowerDataRiskToLow; + /** + * @return The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + * Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + * + */ + private @Nullable List profileGenerationsToTags; + /** + * @return The tags to associate with different conditions. + * Structure is documented below. + * + */ + private @Nullable List tagConditions; + + private PreventionDiscoveryConfigActionTagResources() {} + /** + * @return Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + * + */ + public Optional lowerDataRiskToLow() { + return Optional.ofNullable(this.lowerDataRiskToLow); + } + /** + * @return The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + * Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + * + */ + public List profileGenerationsToTags() { + return this.profileGenerationsToTags == null ? List.of() : this.profileGenerationsToTags; + } + /** + * @return The tags to associate with different conditions. + * Structure is documented below. + * + */ + public List tagConditions() { + return this.tagConditions == null ? List.of() : this.tagConditions; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(PreventionDiscoveryConfigActionTagResources defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable Boolean lowerDataRiskToLow; + private @Nullable List profileGenerationsToTags; + private @Nullable List tagConditions; + public Builder() {} + public Builder(PreventionDiscoveryConfigActionTagResources defaults) { + Objects.requireNonNull(defaults); + this.lowerDataRiskToLow = defaults.lowerDataRiskToLow; + this.profileGenerationsToTags = defaults.profileGenerationsToTags; + this.tagConditions = defaults.tagConditions; + } + + @CustomType.Setter + public Builder lowerDataRiskToLow(@Nullable Boolean lowerDataRiskToLow) { + + this.lowerDataRiskToLow = lowerDataRiskToLow; + return this; + } + @CustomType.Setter + public Builder profileGenerationsToTags(@Nullable List profileGenerationsToTags) { + + this.profileGenerationsToTags = profileGenerationsToTags; + return this; + } + public Builder profileGenerationsToTags(String... profileGenerationsToTags) { + return profileGenerationsToTags(List.of(profileGenerationsToTags)); + } + @CustomType.Setter + public Builder tagConditions(@Nullable List tagConditions) { + + this.tagConditions = tagConditions; + return this; + } + public Builder tagConditions(PreventionDiscoveryConfigActionTagResourcesTagCondition... tagConditions) { + return tagConditions(List.of(tagConditions)); + } + public PreventionDiscoveryConfigActionTagResources build() { + final var _resultValue = new PreventionDiscoveryConfigActionTagResources(); + _resultValue.lowerDataRiskToLow = lowerDataRiskToLow; + _resultValue.profileGenerationsToTags = profileGenerationsToTags; + _resultValue.tagConditions = tagConditions; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResourcesTagCondition.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResourcesTagCondition.java new file mode 100644 index 0000000000..da1ad37161 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResourcesTagCondition.java @@ -0,0 +1,83 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.dataloss.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.gcp.dataloss.outputs.PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore; +import com.pulumi.gcp.dataloss.outputs.PreventionDiscoveryConfigActionTagResourcesTagConditionTag; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class PreventionDiscoveryConfigActionTagResourcesTagCondition { + /** + * @return Conditions attaching the tag to a resource on its profile having this sensitivity score. + * Structure is documented below. + * + */ + private @Nullable PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore sensitivityScore; + /** + * @return The tag value to attach to resources. + * Structure is documented below. + * + */ + private @Nullable PreventionDiscoveryConfigActionTagResourcesTagConditionTag tag; + + private PreventionDiscoveryConfigActionTagResourcesTagCondition() {} + /** + * @return Conditions attaching the tag to a resource on its profile having this sensitivity score. + * Structure is documented below. + * + */ + public Optional sensitivityScore() { + return Optional.ofNullable(this.sensitivityScore); + } + /** + * @return The tag value to attach to resources. + * Structure is documented below. + * + */ + public Optional tag() { + return Optional.ofNullable(this.tag); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(PreventionDiscoveryConfigActionTagResourcesTagCondition defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore sensitivityScore; + private @Nullable PreventionDiscoveryConfigActionTagResourcesTagConditionTag tag; + public Builder() {} + public Builder(PreventionDiscoveryConfigActionTagResourcesTagCondition defaults) { + Objects.requireNonNull(defaults); + this.sensitivityScore = defaults.sensitivityScore; + this.tag = defaults.tag; + } + + @CustomType.Setter + public Builder sensitivityScore(@Nullable PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore sensitivityScore) { + + this.sensitivityScore = sensitivityScore; + return this; + } + @CustomType.Setter + public Builder tag(@Nullable PreventionDiscoveryConfigActionTagResourcesTagConditionTag tag) { + + this.tag = tag; + return this; + } + public PreventionDiscoveryConfigActionTagResourcesTagCondition build() { + final var _resultValue = new PreventionDiscoveryConfigActionTagResourcesTagCondition(); + _resultValue.sensitivityScore = sensitivityScore; + _resultValue.tag = tag; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore.java new file mode 100644 index 0000000000..2a48b3f0c8 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore.java @@ -0,0 +1,60 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.dataloss.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore { + /** + * @return The sensitivity score applied to the resource. + * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + * + */ + private String score; + + private PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore() {} + /** + * @return The sensitivity score applied to the resource. + * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + * + */ + public String score() { + return this.score; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String score; + public Builder() {} + public Builder(PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore defaults) { + Objects.requireNonNull(defaults); + this.score = defaults.score; + } + + @CustomType.Setter + public Builder score(String score) { + if (score == null) { + throw new MissingRequiredPropertyException("PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore", "score"); + } + this.score = score; + return this; + } + public PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore build() { + final var _resultValue = new PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore(); + _resultValue.score = score; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTag.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTag.java new file mode 100644 index 0000000000..5685657199 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigActionTagResourcesTagConditionTag.java @@ -0,0 +1,57 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.dataloss.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class PreventionDiscoveryConfigActionTagResourcesTagConditionTag { + /** + * @return The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + * + */ + private @Nullable String namespacedValue; + + private PreventionDiscoveryConfigActionTagResourcesTagConditionTag() {} + /** + * @return The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + * + */ + public Optional namespacedValue() { + return Optional.ofNullable(this.namespacedValue); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(PreventionDiscoveryConfigActionTagResourcesTagConditionTag defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String namespacedValue; + public Builder() {} + public Builder(PreventionDiscoveryConfigActionTagResourcesTagConditionTag defaults) { + Objects.requireNonNull(defaults); + this.namespacedValue = defaults.namespacedValue; + } + + @CustomType.Setter + public Builder namespacedValue(@Nullable String namespacedValue) { + + this.namespacedValue = namespacedValue; + return this; + } + public PreventionDiscoveryConfigActionTagResourcesTagConditionTag build() { + final var _resultValue = new PreventionDiscoveryConfigActionTagResourcesTagConditionTag(); + _resultValue.namespacedValue = namespacedValue; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadence.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadence.java index 6c6d9d48af..ba6ca2098c 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadence.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadence.java @@ -4,6 +4,7 @@ package com.pulumi.gcp.dataloss.outputs; import com.pulumi.core.annotations.CustomType; +import com.pulumi.gcp.dataloss.outputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence; import com.pulumi.gcp.dataloss.outputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence; import com.pulumi.gcp.dataloss.outputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadence; import java.util.Objects; @@ -12,6 +13,12 @@ @CustomType public final class PreventionDiscoveryConfigTargetBigQueryTargetCadence { + /** + * @return Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + * Structure is documented below. + * + */ + private @Nullable PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence inspectTemplateModifiedCadence; /** * @return Governs when to update data profiles when a schema is modified * Structure is documented below. @@ -26,6 +33,14 @@ public final class PreventionDiscoveryConfigTargetBigQueryTargetCadence { private @Nullable PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadence tableModifiedCadence; private PreventionDiscoveryConfigTargetBigQueryTargetCadence() {} + /** + * @return Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + * Structure is documented below. + * + */ + public Optional inspectTemplateModifiedCadence() { + return Optional.ofNullable(this.inspectTemplateModifiedCadence); + } /** * @return Governs when to update data profiles when a schema is modified * Structure is documented below. @@ -52,15 +67,23 @@ public static Builder builder(PreventionDiscoveryConfigTargetBigQueryTargetCaden } @CustomType.Builder public static final class Builder { + private @Nullable PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence inspectTemplateModifiedCadence; private @Nullable PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence schemaModifiedCadence; private @Nullable PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadence tableModifiedCadence; public Builder() {} public Builder(PreventionDiscoveryConfigTargetBigQueryTargetCadence defaults) { Objects.requireNonNull(defaults); + this.inspectTemplateModifiedCadence = defaults.inspectTemplateModifiedCadence; this.schemaModifiedCadence = defaults.schemaModifiedCadence; this.tableModifiedCadence = defaults.tableModifiedCadence; } + @CustomType.Setter + public Builder inspectTemplateModifiedCadence(@Nullable PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence inspectTemplateModifiedCadence) { + + this.inspectTemplateModifiedCadence = inspectTemplateModifiedCadence; + return this; + } @CustomType.Setter public Builder schemaModifiedCadence(@Nullable PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence schemaModifiedCadence) { @@ -75,6 +98,7 @@ public Builder tableModifiedCadence(@Nullable PreventionDiscoveryConfigTargetBig } public PreventionDiscoveryConfigTargetBigQueryTargetCadence build() { final var _resultValue = new PreventionDiscoveryConfigTargetBigQueryTargetCadence(); + _resultValue.inspectTemplateModifiedCadence = inspectTemplateModifiedCadence; _resultValue.schemaModifiedCadence = schemaModifiedCadence; _resultValue.tableModifiedCadence = tableModifiedCadence; return _resultValue; diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence.java new file mode 100644 index 0000000000..373d7d013b --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence.java @@ -0,0 +1,59 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.dataloss.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence { + /** + * @return How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + * + */ + private @Nullable String frequency; + + private PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence() {} + /** + * @return How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + * + */ + public Optional frequency() { + return Optional.ofNullable(this.frequency); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String frequency; + public Builder() {} + public Builder(PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence defaults) { + Objects.requireNonNull(defaults); + this.frequency = defaults.frequency; + } + + @CustomType.Setter + public Builder frequency(@Nullable String frequency) { + + this.frequency = frequency; + return this; + } + public PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence build() { + final var _resultValue = new PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence(); + _resultValue.frequency = frequency; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence.java index ba0bbfc2fb..b4f62e6481 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence.java @@ -4,6 +4,7 @@ package com.pulumi.gcp.dataloss.outputs; import com.pulumi.core.annotations.CustomType; +import com.pulumi.gcp.dataloss.outputs.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence; import com.pulumi.gcp.dataloss.outputs.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence; import java.lang.String; import java.util.Objects; @@ -12,6 +13,12 @@ @CustomType public final class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence { + /** + * @return Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + * Structure is documented below. + * + */ + private @Nullable PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence inspectTemplateModifiedCadence; /** * @return Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. @@ -26,6 +33,14 @@ public final class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenc private @Nullable PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence schemaModifiedCadence; private PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence() {} + /** + * @return Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + * Structure is documented below. + * + */ + public Optional inspectTemplateModifiedCadence() { + return Optional.ofNullable(this.inspectTemplateModifiedCadence); + } /** * @return Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. @@ -52,15 +67,23 @@ public static Builder builder(PreventionDiscoveryConfigTargetCloudSqlTargetGener } @CustomType.Builder public static final class Builder { + private @Nullable PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence inspectTemplateModifiedCadence; private @Nullable String refreshFrequency; private @Nullable PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence schemaModifiedCadence; public Builder() {} public Builder(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence defaults) { Objects.requireNonNull(defaults); + this.inspectTemplateModifiedCadence = defaults.inspectTemplateModifiedCadence; this.refreshFrequency = defaults.refreshFrequency; this.schemaModifiedCadence = defaults.schemaModifiedCadence; } + @CustomType.Setter + public Builder inspectTemplateModifiedCadence(@Nullable PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence inspectTemplateModifiedCadence) { + + this.inspectTemplateModifiedCadence = inspectTemplateModifiedCadence; + return this; + } @CustomType.Setter public Builder refreshFrequency(@Nullable String refreshFrequency) { @@ -75,6 +98,7 @@ public Builder schemaModifiedCadence(@Nullable PreventionDiscoveryConfigTargetCl } public PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence build() { final var _resultValue = new PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence(); + _resultValue.inspectTemplateModifiedCadence = inspectTemplateModifiedCadence; _resultValue.refreshFrequency = refreshFrequency; _resultValue.schemaModifiedCadence = schemaModifiedCadence; return _resultValue; diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence.java b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence.java new file mode 100644 index 0000000000..080393ba3e --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataloss/outputs/PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence.java @@ -0,0 +1,60 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.dataloss.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence { + /** + * @return How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + * + */ + private String frequency; + + private PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence() {} + /** + * @return How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + * + */ + public String frequency() { + return this.frequency; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String frequency; + public Builder() {} + public Builder(PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence defaults) { + Objects.requireNonNull(defaults); + this.frequency = defaults.frequency; + } + + @CustomType.Setter + public Builder frequency(String frequency) { + if (frequency == null) { + throw new MissingRequiredPropertyException("PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence", "frequency"); + } + this.frequency = frequency; + return this; + } + public PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence build() { + final var _resultValue = new PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence(); + _resultValue.frequency = frequency; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigArgs.java index 7023d826a5..f300fd7501 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigArgs.java @@ -210,14 +210,14 @@ public Optional stagingBucket; /** - * @return A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * @return A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). * */ public Optional> stagingBucket() { @@ -559,7 +559,7 @@ public Builder softwareConfig(WorkflowTemplatePlacementManagedClusterConfigSoftw } /** - * @param stagingBucket A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * @param stagingBucket A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). * * @return builder * @@ -570,7 +570,7 @@ public Builder stagingBucket(@Nullable Output stagingBucket) { } /** - * @param stagingBucket A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * @param stagingBucket A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs.java index 0d930e1df4..22de1be6b1 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs.java @@ -37,14 +37,14 @@ public Optional> internalIpOnly() { } /** - * The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + * The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). * */ @Import(name="metadata") private @Nullable Output> metadata; /** - * @return The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + * @return The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). * */ public Optional>> metadata() { @@ -172,14 +172,14 @@ public Optional> subnetwork() { } /** - * The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + * The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). * */ @Import(name="tags") private @Nullable Output> tags; /** - * @return The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + * @return The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). * */ public Optional>> tags() { @@ -258,7 +258,7 @@ public Builder internalIpOnly(Boolean internalIpOnly) { } /** - * @param metadata The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + * @param metadata The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). * * @return builder * @@ -269,7 +269,7 @@ public Builder metadata(@Nullable Output> metadata) { } /** - * @param metadata The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + * @param metadata The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). * * @return builder * @@ -457,7 +457,7 @@ public Builder subnetwork(String subnetwork) { } /** - * @param tags The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + * @param tags The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). * * @return builder * @@ -468,7 +468,7 @@ public Builder tags(@Nullable Output> tags) { } /** - * @param tags The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + * @param tags The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). * * @return builder * @@ -478,7 +478,7 @@ public Builder tags(List tags) { } /** - * @param tags The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + * @param tags The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs.java index 0aa02e8be7..faa9d05d27 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs.java @@ -31,14 +31,14 @@ public Optional> executableFile() { } /** - * Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + * Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. * */ @Import(name="executionTimeout") private @Nullable Output executionTimeout; /** - * @return Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + * @return Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. * */ public Optional> executionTimeout() { @@ -92,7 +92,7 @@ public Builder executableFile(String executableFile) { } /** - * @param executionTimeout Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + * @param executionTimeout Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. * * @return builder * @@ -103,7 +103,7 @@ public Builder executionTimeout(@Nullable Output executionTimeout) { } /** - * @param executionTimeout Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + * @param executionTimeout Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs.java index 79ceb6c1bf..625b8b9b7b 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs.java @@ -16,14 +16,14 @@ public final class WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigA public static final WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs Empty = new WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs(); /** - * The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * */ @Import(name="autoDeleteTime") private @Nullable Output autoDeleteTime; /** - * @return The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @return The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * */ public Optional> autoDeleteTime() { @@ -31,14 +31,14 @@ public Optional> autoDeleteTime() { } /** - * The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * */ @Import(name="autoDeleteTtl") private @Nullable Output autoDeleteTtl; /** - * @return The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @return The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * */ public Optional> autoDeleteTtl() { @@ -46,14 +46,14 @@ public Optional> autoDeleteTtl() { } /** - * The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + * The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). * */ @Import(name="idleDeleteTtl") private @Nullable Output idleDeleteTtl; /** - * @return The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + * @return The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). * */ public Optional> idleDeleteTtl() { @@ -61,14 +61,14 @@ public Optional> idleDeleteTtl() { } /** - * Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * */ @Import(name="idleStartTime") private @Nullable Output idleStartTime; /** - * @return Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @return Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * */ public Optional> idleStartTime() { @@ -103,7 +103,7 @@ public Builder(WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs } /** - * @param autoDeleteTime The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @param autoDeleteTime The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * * @return builder * @@ -114,7 +114,7 @@ public Builder autoDeleteTime(@Nullable Output autoDeleteTime) { } /** - * @param autoDeleteTime The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @param autoDeleteTime The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * * @return builder * @@ -124,7 +124,7 @@ public Builder autoDeleteTime(String autoDeleteTime) { } /** - * @param autoDeleteTtl The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @param autoDeleteTtl The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * * @return builder * @@ -135,7 +135,7 @@ public Builder autoDeleteTtl(@Nullable Output autoDeleteTtl) { } /** - * @param autoDeleteTtl The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @param autoDeleteTtl The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * * @return builder * @@ -145,7 +145,7 @@ public Builder autoDeleteTtl(String autoDeleteTtl) { } /** - * @param idleDeleteTtl The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + * @param idleDeleteTtl The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). * * @return builder * @@ -156,7 +156,7 @@ public Builder idleDeleteTtl(@Nullable Output idleDeleteTtl) { } /** - * @param idleDeleteTtl The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + * @param idleDeleteTtl The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). * * @return builder * @@ -166,7 +166,7 @@ public Builder idleDeleteTtl(String idleDeleteTtl) { } /** - * @param idleStartTime Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @param idleStartTime Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * * @return builder * @@ -177,7 +177,7 @@ public Builder idleStartTime(@Nullable Output idleStartTime) { } /** - * @param idleStartTime Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @param idleStartTime Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs.java index a32ab5eaa8..a422935ad2 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/inputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs.java @@ -127,14 +127,14 @@ public Optional minCpuPlatform; /** - * @return Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + * @return Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). * */ public Optional> minCpuPlatform() { @@ -382,7 +382,7 @@ public Builder managedGroupConfigs(WorkflowTemplatePlacementManagedClusterConfig } /** - * @param minCpuPlatform Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + * @param minCpuPlatform Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). * * @return builder * @@ -393,7 +393,7 @@ public Builder minCpuPlatform(@Nullable Output minCpuPlatform) { } /** - * @param minCpuPlatform Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + * @param minCpuPlatform Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfig.java index f8c2b2d8f4..2c098597a9 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfig.java @@ -86,7 +86,7 @@ public final class WorkflowTemplatePlacementManagedClusterConfig { */ private @Nullable WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig softwareConfig; /** - * @return A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * @return A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). * */ private @Nullable String stagingBucket; @@ -189,7 +189,7 @@ public Optional sof return Optional.ofNullable(this.softwareConfig); } /** - * @return A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * @return A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). * */ public Optional stagingBucket() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig.java index 5805c0fb9a..1389cbd09e 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig.java @@ -23,7 +23,7 @@ public final class WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig */ private @Nullable Boolean internalIpOnly; /** - * @return The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + * @return The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). * */ private @Nullable Map metadata; @@ -68,7 +68,7 @@ public final class WorkflowTemplatePlacementManagedClusterConfigGceClusterConfig */ private @Nullable String subnetwork; /** - * @return The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + * @return The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). * */ private @Nullable List tags; @@ -87,7 +87,7 @@ public Optional internalIpOnly() { return Optional.ofNullable(this.internalIpOnly); } /** - * @return The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + * @return The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). * */ public Map metadata() { @@ -150,7 +150,7 @@ public Optional subnetwork() { return Optional.ofNullable(this.subnetwork); } /** - * @return The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + * @return The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). * */ public List tags() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigInitializationAction.java b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigInitializationAction.java index 712f6fa909..37a7209947 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigInitializationAction.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigInitializationAction.java @@ -17,7 +17,7 @@ public final class WorkflowTemplatePlacementManagedClusterConfigInitializationAc */ private @Nullable String executableFile; /** - * @return Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + * @return Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. * */ private @Nullable String executionTimeout; @@ -31,7 +31,7 @@ public Optional executableFile() { return Optional.ofNullable(this.executableFile); } /** - * @return Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + * @return Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. * */ public Optional executionTimeout() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig.java index e38bd1af6a..6c2080426b 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig.java @@ -12,50 +12,50 @@ @CustomType public final class WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { /** - * @return The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @return The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * */ private @Nullable String autoDeleteTime; /** - * @return The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @return The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * */ private @Nullable String autoDeleteTtl; /** - * @return The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + * @return The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). * */ private @Nullable String idleDeleteTtl; /** - * @return Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @return Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * */ private @Nullable String idleStartTime; private WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig() {} /** - * @return The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @return The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * */ public Optional autoDeleteTime() { return Optional.ofNullable(this.autoDeleteTime); } /** - * @return The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @return The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * */ public Optional autoDeleteTtl() { return Optional.ofNullable(this.autoDeleteTtl); } /** - * @return The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + * @return The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). * */ public Optional idleDeleteTtl() { return Optional.ofNullable(this.idleDeleteTtl); } /** - * @return Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * @return Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). * */ public Optional idleStartTime() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfig.java index 7df13cb367..13d3478245 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/dataproc/outputs/WorkflowTemplatePlacementManagedClusterConfigMasterConfig.java @@ -53,7 +53,7 @@ public final class WorkflowTemplatePlacementManagedClusterConfigMasterConfig { */ private @Nullable List managedGroupConfigs; /** - * @return Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + * @return Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). * */ private @Nullable String minCpuPlatform; @@ -119,7 +119,7 @@ public List minCpuPlatform() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/datastream/Stream.java b/sdk/java/src/main/java/com/pulumi/gcp/datastream/Stream.java index 33a5d478fd..a9f9e322c9 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/datastream/Stream.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/datastream/Stream.java @@ -558,6 +558,7 @@ * import com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs; * import com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigArgs; * import com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs; + * import com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs; * import com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs; * import com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs; * import com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs; @@ -654,6 +655,153 @@ * .build()) * .build()) * .build()) + * .transactionLogs() + * .build()) + * .build()) + * .destinationConfig(StreamDestinationConfigArgs.builder() + * .destinationConnectionProfile(destination.id()) + * .bigqueryDestinationConfig(StreamDestinationConfigBigqueryDestinationConfigArgs.builder() + * .dataFreshness("900s") + * .sourceHierarchyDatasets(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs.builder() + * .datasetTemplate(StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs.builder() + * .location("us-central1") + * .build()) + * .build()) + * .build()) + * .build()) + * .backfillNone() + * .build()); + * + * } + * } + * } + * + * <!--End PulumiCodeChooser --> + * ### Datastream Stream Sql Server Change Tables + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.sql.DatabaseInstance;
+ * import com.pulumi.gcp.sql.DatabaseInstanceArgs;
+ * import com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsArgs;
+ * import com.pulumi.gcp.sql.inputs.DatabaseInstanceSettingsIpConfigurationArgs;
+ * import com.pulumi.gcp.sql.User;
+ * import com.pulumi.gcp.sql.UserArgs;
+ * import com.pulumi.gcp.sql.Database;
+ * import com.pulumi.gcp.sql.DatabaseArgs;
+ * import com.pulumi.gcp.datastream.ConnectionProfile;
+ * import com.pulumi.gcp.datastream.ConnectionProfileArgs;
+ * import com.pulumi.gcp.datastream.inputs.ConnectionProfileSqlServerProfileArgs;
+ * import com.pulumi.gcp.datastream.inputs.ConnectionProfileBigqueryProfileArgs;
+ * import com.pulumi.gcp.datastream.Stream;
+ * import com.pulumi.gcp.datastream.StreamArgs;
+ * import com.pulumi.gcp.datastream.inputs.StreamSourceConfigArgs;
+ * import com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigArgs;
+ * import com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs;
+ * import com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigChangeTablesArgs;
+ * import com.pulumi.gcp.datastream.inputs.StreamDestinationConfigArgs;
+ * import com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigArgs;
+ * import com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsArgs;
+ * import com.pulumi.gcp.datastream.inputs.StreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateArgs;
+ * import com.pulumi.gcp.datastream.inputs.StreamBackfillNoneArgs;
+ * import com.pulumi.resources.CustomResourceOptions;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var instance = new DatabaseInstance("instance", DatabaseInstanceArgs.builder()
+ *             .name("sql-server")
+ *             .databaseVersion("SQLSERVER_2019_STANDARD")
+ *             .region("us-central1")
+ *             .rootPassword("root-password")
+ *             .deletionProtection("true")
+ *             .settings(DatabaseInstanceSettingsArgs.builder()
+ *                 .tier("db-custom-2-4096")
+ *                 .ipConfiguration(DatabaseInstanceSettingsIpConfigurationArgs.builder()
+ *                     .authorizedNetworks(                    
+ *                         DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()
+ *                             .value("34.71.242.81")
+ *                             .build(),
+ *                         DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()
+ *                             .value("34.72.28.29")
+ *                             .build(),
+ *                         DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()
+ *                             .value("34.67.6.157")
+ *                             .build(),
+ *                         DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()
+ *                             .value("34.67.234.134")
+ *                             .build(),
+ *                         DatabaseInstanceSettingsIpConfigurationAuthorizedNetworkArgs.builder()
+ *                             .value("34.72.239.218")
+ *                             .build())
+ *                     .build())
+ *                 .build())
+ *             .build());
+ * 
+ *         var user = new User("user", UserArgs.builder()
+ *             .name("user")
+ *             .instance(instance.name())
+ *             .password("password")
+ *             .build());
+ * 
+ *         var db = new Database("db", DatabaseArgs.builder()
+ *             .name("db")
+ *             .instance(instance.name())
+ *             .build(), CustomResourceOptions.builder()
+ *                 .dependsOn(user)
+ *                 .build());
+ * 
+ *         var source = new ConnectionProfile("source", ConnectionProfileArgs.builder()
+ *             .displayName("SQL Server Source")
+ *             .location("us-central1")
+ *             .connectionProfileId("source-profile")
+ *             .sqlServerProfile(ConnectionProfileSqlServerProfileArgs.builder()
+ *                 .hostname(instance.publicIpAddress())
+ *                 .port(1433)
+ *                 .username(user.name())
+ *                 .password(user.password())
+ *                 .database(db.name())
+ *                 .build())
+ *             .build());
+ * 
+ *         var destination = new ConnectionProfile("destination", ConnectionProfileArgs.builder()
+ *             .displayName("BigQuery Destination")
+ *             .location("us-central1")
+ *             .connectionProfileId("destination-profile")
+ *             .bigqueryProfile()
+ *             .build());
+ * 
+ *         var default_ = new Stream("default", StreamArgs.builder()
+ *             .displayName("SQL Server to BigQuery")
+ *             .location("us-central1")
+ *             .streamId("stream")
+ *             .sourceConfig(StreamSourceConfigArgs.builder()
+ *                 .sourceConnectionProfile(source.id())
+ *                 .sqlServerSourceConfig(StreamSourceConfigSqlServerSourceConfigArgs.builder()
+ *                     .includeObjects(StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs.builder()
+ *                         .schemas(StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaArgs.builder()
+ *                             .schema("schema")
+ *                             .tables(StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArgs.builder()
+ *                                 .table("table")
+ *                                 .build())
+ *                             .build())
+ *                         .build())
+ *                     .changeTables()
  *                     .build())
  *                 .build())
  *             .destinationConfig(StreamDestinationConfigArgs.builder()
@@ -1215,14 +1363,16 @@ public Output> customerManagedEncryptionKey() {
         return Codegen.optional(this.customerManagedEncryptionKey);
     }
     /**
-     * Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.
+     * Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible
+     * values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED
      * 
      */
     @Export(name="desiredState", refs={String.class}, tree="[0]")
     private Output desiredState;
 
     /**
-     * @return Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.
+     * @return Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible
+     * values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED
      * 
      */
     public Output> desiredState() {
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/datastream/StreamArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/datastream/StreamArgs.java
index fa9c324ed0..6299b5c1cd 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/datastream/StreamArgs.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/datastream/StreamArgs.java
@@ -85,14 +85,16 @@ public Optional> customerManagedEncryptionKey() {
     }
 
     /**
-     * Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.
+     * Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible
+     * values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED
      * 
      */
     @Import(name="desiredState")
     private @Nullable Output desiredState;
 
     /**
-     * @return Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.
+     * @return Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible
+     * values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED
      * 
      */
     public Optional> desiredState() {
@@ -324,7 +326,8 @@ public Builder customerManagedEncryptionKey(String customerManagedEncryptionKey)
         }
 
         /**
-         * @param desiredState Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.
+         * @param desiredState Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible
+         * values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED
          * 
          * @return builder
          * 
@@ -335,7 +338,8 @@ public Builder desiredState(@Nullable Output desiredState) {
         }
 
         /**
-         * @param desiredState Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.
+         * @param desiredState Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible
+         * values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED
          * 
          * @return builder
          * 
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamSourceConfigSqlServerSourceConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamSourceConfigSqlServerSourceConfigArgs.java
index dfad985afb..ea63ee34b2 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamSourceConfigSqlServerSourceConfigArgs.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamSourceConfigSqlServerSourceConfigArgs.java
@@ -5,8 +5,10 @@
 
 import com.pulumi.core.Output;
 import com.pulumi.core.annotations.Import;
+import com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigChangeTablesArgs;
 import com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigExcludeObjectsArgs;
 import com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs;
+import com.pulumi.gcp.datastream.inputs.StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs;
 import java.lang.Integer;
 import java.util.Objects;
 import java.util.Optional;
@@ -17,6 +19,21 @@ public final class StreamSourceConfigSqlServerSourceConfigArgs extends com.pulum
 
     public static final StreamSourceConfigSqlServerSourceConfigArgs Empty = new StreamSourceConfigSqlServerSourceConfigArgs();
 
+    /**
+     * CDC reader reads from change tables.
+     * 
+     */
+    @Import(name="changeTables")
+    private @Nullable Output changeTables;
+
+    /**
+     * @return CDC reader reads from change tables.
+     * 
+     */
+    public Optional> changeTables() {
+        return Optional.ofNullable(this.changeTables);
+    }
+
     /**
      * SQL Server objects to exclude from the stream.
      * Structure is documented below.
@@ -81,13 +98,30 @@ public Optional> maxConcurrentCdcTasks() {
         return Optional.ofNullable(this.maxConcurrentCdcTasks);
     }
 
+    /**
+     * CDC reader reads from transaction logs.
+     * 
+     */
+    @Import(name="transactionLogs")
+    private @Nullable Output transactionLogs;
+
+    /**
+     * @return CDC reader reads from transaction logs.
+     * 
+     */
+    public Optional> transactionLogs() {
+        return Optional.ofNullable(this.transactionLogs);
+    }
+
     private StreamSourceConfigSqlServerSourceConfigArgs() {}
 
     private StreamSourceConfigSqlServerSourceConfigArgs(StreamSourceConfigSqlServerSourceConfigArgs $) {
+        this.changeTables = $.changeTables;
         this.excludeObjects = $.excludeObjects;
         this.includeObjects = $.includeObjects;
         this.maxConcurrentBackfillTasks = $.maxConcurrentBackfillTasks;
         this.maxConcurrentCdcTasks = $.maxConcurrentCdcTasks;
+        this.transactionLogs = $.transactionLogs;
     }
 
     public static Builder builder() {
@@ -108,6 +142,27 @@ public Builder(StreamSourceConfigSqlServerSourceConfigArgs defaults) {
             $ = new StreamSourceConfigSqlServerSourceConfigArgs(Objects.requireNonNull(defaults));
         }
 
+        /**
+         * @param changeTables CDC reader reads from change tables.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder changeTables(@Nullable Output changeTables) {
+            $.changeTables = changeTables;
+            return this;
+        }
+
+        /**
+         * @param changeTables CDC reader reads from change tables.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder changeTables(StreamSourceConfigSqlServerSourceConfigChangeTablesArgs changeTables) {
+            return changeTables(Output.of(changeTables));
+        }
+
         /**
          * @param excludeObjects SQL Server objects to exclude from the stream.
          * Structure is documented below.
@@ -196,6 +251,27 @@ public Builder maxConcurrentCdcTasks(Integer maxConcurrentCdcTasks) {
             return maxConcurrentCdcTasks(Output.of(maxConcurrentCdcTasks));
         }
 
+        /**
+         * @param transactionLogs CDC reader reads from transaction logs.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder transactionLogs(@Nullable Output transactionLogs) {
+            $.transactionLogs = transactionLogs;
+            return this;
+        }
+
+        /**
+         * @param transactionLogs CDC reader reads from transaction logs.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder transactionLogs(StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs transactionLogs) {
+            return transactionLogs(Output.of(transactionLogs));
+        }
+
         public StreamSourceConfigSqlServerSourceConfigArgs build() {
             return $;
         }
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamSourceConfigSqlServerSourceConfigChangeTablesArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamSourceConfigSqlServerSourceConfigChangeTablesArgs.java
new file mode 100644
index 0000000000..3e42e856b8
--- /dev/null
+++ b/sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamSourceConfigSqlServerSourceConfigChangeTablesArgs.java
@@ -0,0 +1,28 @@
+// *** WARNING: this file was generated by pulumi-java-gen. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+package com.pulumi.gcp.datastream.inputs;
+
+
+
+
+public final class StreamSourceConfigSqlServerSourceConfigChangeTablesArgs extends com.pulumi.resources.ResourceArgs {
+
+    public static final StreamSourceConfigSqlServerSourceConfigChangeTablesArgs Empty = new StreamSourceConfigSqlServerSourceConfigChangeTablesArgs();
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static final class Builder {
+        private StreamSourceConfigSqlServerSourceConfigChangeTablesArgs $;
+
+        public Builder() {
+            $ = new StreamSourceConfigSqlServerSourceConfigChangeTablesArgs();
+        }
+        public StreamSourceConfigSqlServerSourceConfigChangeTablesArgs build() {
+            return $;
+        }
+    }
+
+}
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs.java
new file mode 100644
index 0000000000..1acba0232b
--- /dev/null
+++ b/sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs.java
@@ -0,0 +1,28 @@
+// *** WARNING: this file was generated by pulumi-java-gen. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+package com.pulumi.gcp.datastream.inputs;
+
+
+
+
+public final class StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs extends com.pulumi.resources.ResourceArgs {
+
+    public static final StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs Empty = new StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs();
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static final class Builder {
+        private StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs $;
+
+        public Builder() {
+            $ = new StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs();
+        }
+        public StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs build() {
+            return $;
+        }
+    }
+
+}
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamState.java b/sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamState.java
index ea8afe728f..9cffa4907b 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamState.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/datastream/inputs/StreamState.java
@@ -84,14 +84,16 @@ public Optional> customerManagedEncryptionKey() {
     }
 
     /**
-     * Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.
+     * Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible
+     * values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED
      * 
      */
     @Import(name="desiredState")
     private @Nullable Output desiredState;
 
     /**
-     * @return Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.
+     * @return Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible
+     * values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED
      * 
      */
     public Optional> desiredState() {
@@ -389,7 +391,8 @@ public Builder customerManagedEncryptionKey(String customerManagedEncryptionKey)
         }
 
         /**
-         * @param desiredState Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.
+         * @param desiredState Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible
+         * values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED
          * 
          * @return builder
          * 
@@ -400,7 +403,8 @@ public Builder desiredState(@Nullable Output desiredState) {
         }
 
         /**
-         * @param desiredState Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.
+         * @param desiredState Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible
+         * values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED
          * 
          * @return builder
          * 
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/datastream/outputs/StreamSourceConfigSqlServerSourceConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/datastream/outputs/StreamSourceConfigSqlServerSourceConfig.java
index ef3afdccb3..77fc629c04 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/datastream/outputs/StreamSourceConfigSqlServerSourceConfig.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/datastream/outputs/StreamSourceConfigSqlServerSourceConfig.java
@@ -4,8 +4,10 @@
 package com.pulumi.gcp.datastream.outputs;
 
 import com.pulumi.core.annotations.CustomType;
+import com.pulumi.gcp.datastream.outputs.StreamSourceConfigSqlServerSourceConfigChangeTables;
 import com.pulumi.gcp.datastream.outputs.StreamSourceConfigSqlServerSourceConfigExcludeObjects;
 import com.pulumi.gcp.datastream.outputs.StreamSourceConfigSqlServerSourceConfigIncludeObjects;
+import com.pulumi.gcp.datastream.outputs.StreamSourceConfigSqlServerSourceConfigTransactionLogs;
 import java.lang.Integer;
 import java.util.Objects;
 import java.util.Optional;
@@ -13,6 +15,11 @@
 
 @CustomType
 public final class StreamSourceConfigSqlServerSourceConfig {
+    /**
+     * @return CDC reader reads from change tables.
+     * 
+     */
+    private @Nullable StreamSourceConfigSqlServerSourceConfigChangeTables changeTables;
     /**
      * @return SQL Server objects to exclude from the stream.
      * Structure is documented below.
@@ -35,8 +42,20 @@ public final class StreamSourceConfigSqlServerSourceConfig {
      * 
      */
     private @Nullable Integer maxConcurrentCdcTasks;
+    /**
+     * @return CDC reader reads from transaction logs.
+     * 
+     */
+    private @Nullable StreamSourceConfigSqlServerSourceConfigTransactionLogs transactionLogs;
 
     private StreamSourceConfigSqlServerSourceConfig() {}
+    /**
+     * @return CDC reader reads from change tables.
+     * 
+     */
+    public Optional changeTables() {
+        return Optional.ofNullable(this.changeTables);
+    }
     /**
      * @return SQL Server objects to exclude from the stream.
      * Structure is documented below.
@@ -67,6 +86,13 @@ public Optional maxConcurrentBackfillTasks() {
     public Optional maxConcurrentCdcTasks() {
         return Optional.ofNullable(this.maxConcurrentCdcTasks);
     }
+    /**
+     * @return CDC reader reads from transaction logs.
+     * 
+     */
+    public Optional transactionLogs() {
+        return Optional.ofNullable(this.transactionLogs);
+    }
 
     public static Builder builder() {
         return new Builder();
@@ -77,19 +103,29 @@ public static Builder builder(StreamSourceConfigSqlServerSourceConfig defaults)
     }
     @CustomType.Builder
     public static final class Builder {
+        private @Nullable StreamSourceConfigSqlServerSourceConfigChangeTables changeTables;
         private @Nullable StreamSourceConfigSqlServerSourceConfigExcludeObjects excludeObjects;
         private @Nullable StreamSourceConfigSqlServerSourceConfigIncludeObjects includeObjects;
         private @Nullable Integer maxConcurrentBackfillTasks;
         private @Nullable Integer maxConcurrentCdcTasks;
+        private @Nullable StreamSourceConfigSqlServerSourceConfigTransactionLogs transactionLogs;
         public Builder() {}
         public Builder(StreamSourceConfigSqlServerSourceConfig defaults) {
     	      Objects.requireNonNull(defaults);
+    	      this.changeTables = defaults.changeTables;
     	      this.excludeObjects = defaults.excludeObjects;
     	      this.includeObjects = defaults.includeObjects;
     	      this.maxConcurrentBackfillTasks = defaults.maxConcurrentBackfillTasks;
     	      this.maxConcurrentCdcTasks = defaults.maxConcurrentCdcTasks;
+    	      this.transactionLogs = defaults.transactionLogs;
         }
 
+        @CustomType.Setter
+        public Builder changeTables(@Nullable StreamSourceConfigSqlServerSourceConfigChangeTables changeTables) {
+
+            this.changeTables = changeTables;
+            return this;
+        }
         @CustomType.Setter
         public Builder excludeObjects(@Nullable StreamSourceConfigSqlServerSourceConfigExcludeObjects excludeObjects) {
 
@@ -114,12 +150,20 @@ public Builder maxConcurrentCdcTasks(@Nullable Integer maxConcurrentCdcTasks) {
             this.maxConcurrentCdcTasks = maxConcurrentCdcTasks;
             return this;
         }
+        @CustomType.Setter
+        public Builder transactionLogs(@Nullable StreamSourceConfigSqlServerSourceConfigTransactionLogs transactionLogs) {
+
+            this.transactionLogs = transactionLogs;
+            return this;
+        }
         public StreamSourceConfigSqlServerSourceConfig build() {
             final var _resultValue = new StreamSourceConfigSqlServerSourceConfig();
+            _resultValue.changeTables = changeTables;
             _resultValue.excludeObjects = excludeObjects;
             _resultValue.includeObjects = includeObjects;
             _resultValue.maxConcurrentBackfillTasks = maxConcurrentBackfillTasks;
             _resultValue.maxConcurrentCdcTasks = maxConcurrentCdcTasks;
+            _resultValue.transactionLogs = transactionLogs;
             return _resultValue;
         }
     }
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/datastream/outputs/StreamSourceConfigSqlServerSourceConfigChangeTables.java b/sdk/java/src/main/java/com/pulumi/gcp/datastream/outputs/StreamSourceConfigSqlServerSourceConfigChangeTables.java
new file mode 100644
index 0000000000..31db5f098f
--- /dev/null
+++ b/sdk/java/src/main/java/com/pulumi/gcp/datastream/outputs/StreamSourceConfigSqlServerSourceConfigChangeTables.java
@@ -0,0 +1,32 @@
+// *** WARNING: this file was generated by pulumi-java-gen. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+package com.pulumi.gcp.datastream.outputs;
+
+import com.pulumi.core.annotations.CustomType;
+import java.util.Objects;
+
+@CustomType
+public final class StreamSourceConfigSqlServerSourceConfigChangeTables {
+    private StreamSourceConfigSqlServerSourceConfigChangeTables() {}
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static Builder builder(StreamSourceConfigSqlServerSourceConfigChangeTables defaults) {
+        return new Builder(defaults);
+    }
+    @CustomType.Builder
+    public static final class Builder {
+        public Builder() {}
+        public Builder(StreamSourceConfigSqlServerSourceConfigChangeTables defaults) {
+    	      Objects.requireNonNull(defaults);
+        }
+
+        public StreamSourceConfigSqlServerSourceConfigChangeTables build() {
+            final var _resultValue = new StreamSourceConfigSqlServerSourceConfigChangeTables();
+            return _resultValue;
+        }
+    }
+}
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/datastream/outputs/StreamSourceConfigSqlServerSourceConfigTransactionLogs.java b/sdk/java/src/main/java/com/pulumi/gcp/datastream/outputs/StreamSourceConfigSqlServerSourceConfigTransactionLogs.java
new file mode 100644
index 0000000000..cf07f5a274
--- /dev/null
+++ b/sdk/java/src/main/java/com/pulumi/gcp/datastream/outputs/StreamSourceConfigSqlServerSourceConfigTransactionLogs.java
@@ -0,0 +1,32 @@
+// *** WARNING: this file was generated by pulumi-java-gen. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+package com.pulumi.gcp.datastream.outputs;
+
+import com.pulumi.core.annotations.CustomType;
+import java.util.Objects;
+
+@CustomType
+public final class StreamSourceConfigSqlServerSourceConfigTransactionLogs {
+    private StreamSourceConfigSqlServerSourceConfigTransactionLogs() {}
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static Builder builder(StreamSourceConfigSqlServerSourceConfigTransactionLogs defaults) {
+        return new Builder(defaults);
+    }
+    @CustomType.Builder
+    public static final class Builder {
+        public Builder() {}
+        public Builder(StreamSourceConfigSqlServerSourceConfigTransactionLogs defaults) {
+    	      Objects.requireNonNull(defaults);
+        }
+
+        public StreamSourceConfigSqlServerSourceConfigTransactionLogs build() {
+            final var _resultValue = new StreamSourceConfigSqlServerSourceConfigTransactionLogs();
+            return _resultValue;
+        }
+    }
+}
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/DataStore.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/DataStore.java
index 2dfafa2eb3..82e403452f 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/DataStore.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/DataStore.java
@@ -267,7 +267,7 @@ public Output> documentProcessingCon
     }
     /**
      * The industry vertical that the data store registers.
-     * Possible values are: `GENERIC`, `MEDIA`.
+     * Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`.
      * 
      */
     @Export(name="industryVertical", refs={String.class}, tree="[0]")
@@ -275,7 +275,7 @@ public Output> documentProcessingCon
 
     /**
      * @return The industry vertical that the data store registers.
-     * Possible values are: `GENERIC`, `MEDIA`.
+     * Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`.
      * 
      */
     public Output industryVertical() {
@@ -361,7 +361,7 @@ public Output> skipDefaultSchemaCreation() {
     }
     /**
      * The solutions that the data store enrolls.
-     * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.
+     * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.
      * 
      */
     @Export(name="solutionTypes", refs={List.class,String.class}, tree="[0,1]")
@@ -369,7 +369,7 @@ public Output> skipDefaultSchemaCreation() {
 
     /**
      * @return The solutions that the data store enrolls.
-     * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.
+     * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.
      * 
      */
     public Output>> solutionTypes() {
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/DataStoreArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/DataStoreArgs.java
index e14c227e1f..09d8bf521d 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/DataStoreArgs.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/DataStoreArgs.java
@@ -110,7 +110,7 @@ public Optional> documentProcessin
 
     /**
      * The industry vertical that the data store registers.
-     * Possible values are: `GENERIC`, `MEDIA`.
+     * Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`.
      * 
      */
     @Import(name="industryVertical", required=true)
@@ -118,7 +118,7 @@ public Optional> documentProcessin
 
     /**
      * @return The industry vertical that the data store registers.
-     * Possible values are: `GENERIC`, `MEDIA`.
+     * Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`.
      * 
      */
     public Output industryVertical() {
@@ -188,7 +188,7 @@ public Optional> skipDefaultSchemaCreation() {
 
     /**
      * The solutions that the data store enrolls.
-     * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.
+     * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.
      * 
      */
     @Import(name="solutionTypes")
@@ -196,7 +196,7 @@ public Optional> skipDefaultSchemaCreation() {
 
     /**
      * @return The solutions that the data store enrolls.
-     * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.
+     * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.
      * 
      */
     public Optional>> solutionTypes() {
@@ -357,7 +357,7 @@ public Builder documentProcessingConfig(DataStoreDocumentProcessingConfigArgs do
 
         /**
          * @param industryVertical The industry vertical that the data store registers.
-         * Possible values are: `GENERIC`, `MEDIA`.
+         * Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`.
          * 
          * @return builder
          * 
@@ -369,7 +369,7 @@ public Builder industryVertical(Output industryVertical) {
 
         /**
          * @param industryVertical The industry vertical that the data store registers.
-         * Possible values are: `GENERIC`, `MEDIA`.
+         * Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`.
          * 
          * @return builder
          * 
@@ -459,7 +459,7 @@ public Builder skipDefaultSchemaCreation(Boolean skipDefaultSchemaCreation) {
 
         /**
          * @param solutionTypes The solutions that the data store enrolls.
-         * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.
+         * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.
          * 
          * @return builder
          * 
@@ -471,7 +471,7 @@ public Builder solutionTypes(@Nullable Output> solutionTypes) {
 
         /**
          * @param solutionTypes The solutions that the data store enrolls.
-         * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.
+         * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.
          * 
          * @return builder
          * 
@@ -482,7 +482,7 @@ public Builder solutionTypes(List solutionTypes) {
 
         /**
          * @param solutionTypes The solutions that the data store enrolls.
-         * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.
+         * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.
          * 
          * @return builder
          * 
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigArgs.java
index fc88a94b54..5e96bfa2fb 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigArgs.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigArgs.java
@@ -5,6 +5,7 @@
 
 import com.pulumi.core.Output;
 import com.pulumi.core.annotations.Import;
+import com.pulumi.gcp.discoveryengine.inputs.DataStoreDocumentProcessingConfigChunkingConfigArgs;
 import com.pulumi.gcp.discoveryengine.inputs.DataStoreDocumentProcessingConfigDefaultParsingConfigArgs;
 import com.pulumi.gcp.discoveryengine.inputs.DataStoreDocumentProcessingConfigParsingConfigOverrideArgs;
 import java.lang.String;
@@ -18,6 +19,23 @@ public final class DataStoreDocumentProcessingConfigArgs extends com.pulumi.reso
 
     public static final DataStoreDocumentProcessingConfigArgs Empty = new DataStoreDocumentProcessingConfigArgs();
 
+    /**
+     * Whether chunking mode is enabled.
+     * Structure is documented below.
+     * 
+     */
+    @Import(name="chunkingConfig")
+    private @Nullable Output chunkingConfig;
+
+    /**
+     * @return Whether chunking mode is enabled.
+     * Structure is documented below.
+     * 
+     */
+    public Optional> chunkingConfig() {
+        return Optional.ofNullable(this.chunkingConfig);
+    }
+
     /**
      * Configurations for default Document parser. If not specified, this resource
      * will be configured to use a default DigitalParsingConfig, and the default parsing
@@ -76,6 +94,7 @@ public Optional chunkingConfig) {
+            $.chunkingConfig = chunkingConfig;
+            return this;
+        }
+
+        /**
+         * @param chunkingConfig Whether chunking mode is enabled.
+         * Structure is documented below.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder chunkingConfig(DataStoreDocumentProcessingConfigChunkingConfigArgs chunkingConfig) {
+            return chunkingConfig(Output.of(chunkingConfig));
+        }
+
         /**
          * @param defaultParsingConfig Configurations for default Document parser. If not specified, this resource
          * will be configured to use a default DigitalParsingConfig, and the default parsing
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigChunkingConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigChunkingConfigArgs.java
new file mode 100644
index 0000000000..b01c8e2de5
--- /dev/null
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigChunkingConfigArgs.java
@@ -0,0 +1,87 @@
+// *** WARNING: this file was generated by pulumi-java-gen. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+package com.pulumi.gcp.discoveryengine.inputs;
+
+import com.pulumi.core.Output;
+import com.pulumi.core.annotations.Import;
+import com.pulumi.gcp.discoveryengine.inputs.DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs;
+import java.util.Objects;
+import java.util.Optional;
+import javax.annotation.Nullable;
+
+
+public final class DataStoreDocumentProcessingConfigChunkingConfigArgs extends com.pulumi.resources.ResourceArgs {
+
+    public static final DataStoreDocumentProcessingConfigChunkingConfigArgs Empty = new DataStoreDocumentProcessingConfigChunkingConfigArgs();
+
+    /**
+     * Configuration for the layout based chunking.
+     * Structure is documented below.
+     * 
+     */
+    @Import(name="layoutBasedChunkingConfig")
+    private @Nullable Output layoutBasedChunkingConfig;
+
+    /**
+     * @return Configuration for the layout based chunking.
+     * Structure is documented below.
+     * 
+     */
+    public Optional> layoutBasedChunkingConfig() {
+        return Optional.ofNullable(this.layoutBasedChunkingConfig);
+    }
+
+    private DataStoreDocumentProcessingConfigChunkingConfigArgs() {}
+
+    private DataStoreDocumentProcessingConfigChunkingConfigArgs(DataStoreDocumentProcessingConfigChunkingConfigArgs $) {
+        this.layoutBasedChunkingConfig = $.layoutBasedChunkingConfig;
+    }
+
+    public static Builder builder() {
+        return new Builder();
+    }
+    public static Builder builder(DataStoreDocumentProcessingConfigChunkingConfigArgs defaults) {
+        return new Builder(defaults);
+    }
+
+    public static final class Builder {
+        private DataStoreDocumentProcessingConfigChunkingConfigArgs $;
+
+        public Builder() {
+            $ = new DataStoreDocumentProcessingConfigChunkingConfigArgs();
+        }
+
+        public Builder(DataStoreDocumentProcessingConfigChunkingConfigArgs defaults) {
+            $ = new DataStoreDocumentProcessingConfigChunkingConfigArgs(Objects.requireNonNull(defaults));
+        }
+
+        /**
+         * @param layoutBasedChunkingConfig Configuration for the layout based chunking.
+         * Structure is documented below.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder layoutBasedChunkingConfig(@Nullable Output layoutBasedChunkingConfig) {
+            $.layoutBasedChunkingConfig = layoutBasedChunkingConfig;
+            return this;
+        }
+
+        /**
+         * @param layoutBasedChunkingConfig Configuration for the layout based chunking.
+         * Structure is documented below.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder layoutBasedChunkingConfig(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs layoutBasedChunkingConfig) {
+            return layoutBasedChunkingConfig(Output.of(layoutBasedChunkingConfig));
+        }
+
+        public DataStoreDocumentProcessingConfigChunkingConfigArgs build() {
+            return $;
+        }
+    }
+
+}
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs.java
new file mode 100644
index 0000000000..7a28fdec57
--- /dev/null
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs.java
@@ -0,0 +1,129 @@
+// *** WARNING: this file was generated by pulumi-java-gen. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+package com.pulumi.gcp.discoveryengine.inputs;
+
+import com.pulumi.core.Output;
+import com.pulumi.core.annotations.Import;
+import java.lang.Boolean;
+import java.lang.Integer;
+import java.util.Objects;
+import java.util.Optional;
+import javax.annotation.Nullable;
+
+
+public final class DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs extends com.pulumi.resources.ResourceArgs {
+
+    public static final DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs Empty = new DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs();
+
+    /**
+     * The token size limit for each chunk.
+     * Supported values: 100-500 (inclusive). Default value: 500.
+     * 
+     */
+    @Import(name="chunkSize")
+    private @Nullable Output chunkSize;
+
+    /**
+     * @return The token size limit for each chunk.
+     * Supported values: 100-500 (inclusive). Default value: 500.
+     * 
+     */
+    public Optional> chunkSize() {
+        return Optional.ofNullable(this.chunkSize);
+    }
+
+    /**
+     * Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss.
+     * Default value: False.
+     * 
+     */
+    @Import(name="includeAncestorHeadings")
+    private @Nullable Output includeAncestorHeadings;
+
+    /**
+     * @return Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss.
+     * Default value: False.
+     * 
+     */
+    public Optional> includeAncestorHeadings() {
+        return Optional.ofNullable(this.includeAncestorHeadings);
+    }
+
+    private DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs() {}
+
+    private DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs $) {
+        this.chunkSize = $.chunkSize;
+        this.includeAncestorHeadings = $.includeAncestorHeadings;
+    }
+
+    public static Builder builder() {
+        return new Builder();
+    }
+    public static Builder builder(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs defaults) {
+        return new Builder(defaults);
+    }
+
+    public static final class Builder {
+        private DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs $;
+
+        public Builder() {
+            $ = new DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs();
+        }
+
+        public Builder(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs defaults) {
+            $ = new DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs(Objects.requireNonNull(defaults));
+        }
+
+        /**
+         * @param chunkSize The token size limit for each chunk.
+         * Supported values: 100-500 (inclusive). Default value: 500.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder chunkSize(@Nullable Output chunkSize) {
+            $.chunkSize = chunkSize;
+            return this;
+        }
+
+        /**
+         * @param chunkSize The token size limit for each chunk.
+         * Supported values: 100-500 (inclusive). Default value: 500.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder chunkSize(Integer chunkSize) {
+            return chunkSize(Output.of(chunkSize));
+        }
+
+        /**
+         * @param includeAncestorHeadings Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss.
+         * Default value: False.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder includeAncestorHeadings(@Nullable Output includeAncestorHeadings) {
+            $.includeAncestorHeadings = includeAncestorHeadings;
+            return this;
+        }
+
+        /**
+         * @param includeAncestorHeadings Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss.
+         * Default value: False.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder includeAncestorHeadings(Boolean includeAncestorHeadings) {
+            return includeAncestorHeadings(Output.of(includeAncestorHeadings));
+        }
+
+        public DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs build() {
+            return $;
+        }
+    }
+
+}
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigArgs.java
index 93d7a2e10e..9a6b732a5a 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigArgs.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigArgs.java
@@ -6,6 +6,7 @@
 import com.pulumi.core.Output;
 import com.pulumi.core.annotations.Import;
 import com.pulumi.gcp.discoveryengine.inputs.DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigArgs;
+import com.pulumi.gcp.discoveryengine.inputs.DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs;
 import com.pulumi.gcp.discoveryengine.inputs.DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigArgs;
 import java.util.Objects;
 import java.util.Optional;
@@ -31,6 +32,21 @@ public Optional layoutParsingConfig;
+
+    /**
+     * @return Configurations applied to layout parser.
+     * 
+     */
+    public Optional> layoutParsingConfig() {
+        return Optional.ofNullable(this.layoutParsingConfig);
+    }
+
     /**
      * Configurations applied to OCR parser. Currently it only applies to PDFs.
      * Structure is documented below.
@@ -52,6 +68,7 @@ private DataStoreDocumentProcessingConfigDefaultParsingConfigArgs() {}
 
     private DataStoreDocumentProcessingConfigDefaultParsingConfigArgs(DataStoreDocumentProcessingConfigDefaultParsingConfigArgs $) {
         this.digitalParsingConfig = $.digitalParsingConfig;
+        this.layoutParsingConfig = $.layoutParsingConfig;
         this.ocrParsingConfig = $.ocrParsingConfig;
     }
 
@@ -94,6 +111,27 @@ public Builder digitalParsingConfig(DataStoreDocumentProcessingConfigDefaultPars
             return digitalParsingConfig(Output.of(digitalParsingConfig));
         }
 
+        /**
+         * @param layoutParsingConfig Configurations applied to layout parser.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder layoutParsingConfig(@Nullable Output layoutParsingConfig) {
+            $.layoutParsingConfig = layoutParsingConfig;
+            return this;
+        }
+
+        /**
+         * @param layoutParsingConfig Configurations applied to layout parser.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder layoutParsingConfig(DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs layoutParsingConfig) {
+            return layoutParsingConfig(Output.of(layoutParsingConfig));
+        }
+
         /**
          * @param ocrParsingConfig Configurations applied to OCR parser. Currently it only applies to PDFs.
          * Structure is documented below.
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs.java
new file mode 100644
index 0000000000..e59acde6e2
--- /dev/null
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs.java
@@ -0,0 +1,28 @@
+// *** WARNING: this file was generated by pulumi-java-gen. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+package com.pulumi.gcp.discoveryengine.inputs;
+
+
+
+
+public final class DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs extends com.pulumi.resources.ResourceArgs {
+
+    public static final DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs Empty = new DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs();
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static final class Builder {
+        private DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs $;
+
+        public Builder() {
+            $ = new DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs();
+        }
+        public DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs build() {
+            return $;
+        }
+    }
+
+}
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideArgs.java
index abaebe9448..7336c9c16c 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideArgs.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideArgs.java
@@ -7,6 +7,7 @@
 import com.pulumi.core.annotations.Import;
 import com.pulumi.exceptions.MissingRequiredPropertyException;
 import com.pulumi.gcp.discoveryengine.inputs.DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigArgs;
+import com.pulumi.gcp.discoveryengine.inputs.DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs;
 import com.pulumi.gcp.discoveryengine.inputs.DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigArgs;
 import java.lang.String;
 import java.util.Objects;
@@ -48,6 +49,21 @@ public Output fileType() {
         return this.fileType;
     }
 
+    /**
+     * Configurations applied to layout parser.
+     * 
+     */
+    @Import(name="layoutParsingConfig")
+    private @Nullable Output layoutParsingConfig;
+
+    /**
+     * @return Configurations applied to layout parser.
+     * 
+     */
+    public Optional> layoutParsingConfig() {
+        return Optional.ofNullable(this.layoutParsingConfig);
+    }
+
     /**
      * Configurations applied to OCR parser. Currently it only applies to PDFs.
      * Structure is documented below.
@@ -70,6 +86,7 @@ private DataStoreDocumentProcessingConfigParsingConfigOverrideArgs() {}
     private DataStoreDocumentProcessingConfigParsingConfigOverrideArgs(DataStoreDocumentProcessingConfigParsingConfigOverrideArgs $) {
         this.digitalParsingConfig = $.digitalParsingConfig;
         this.fileType = $.fileType;
+        this.layoutParsingConfig = $.layoutParsingConfig;
         this.ocrParsingConfig = $.ocrParsingConfig;
     }
 
@@ -133,6 +150,27 @@ public Builder fileType(String fileType) {
             return fileType(Output.of(fileType));
         }
 
+        /**
+         * @param layoutParsingConfig Configurations applied to layout parser.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder layoutParsingConfig(@Nullable Output layoutParsingConfig) {
+            $.layoutParsingConfig = layoutParsingConfig;
+            return this;
+        }
+
+        /**
+         * @param layoutParsingConfig Configurations applied to layout parser.
+         * 
+         * @return builder
+         * 
+         */
+        public Builder layoutParsingConfig(DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs layoutParsingConfig) {
+            return layoutParsingConfig(Output.of(layoutParsingConfig));
+        }
+
         /**
          * @param ocrParsingConfig Configurations applied to OCR parser. Currently it only applies to PDFs.
          * Structure is documented below.
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs.java
new file mode 100644
index 0000000000..05f517342c
--- /dev/null
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs.java
@@ -0,0 +1,28 @@
+// *** WARNING: this file was generated by pulumi-java-gen. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+package com.pulumi.gcp.discoveryengine.inputs;
+
+
+
+
+public final class DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs extends com.pulumi.resources.ResourceArgs {
+
+    public static final DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs Empty = new DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs();
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static final class Builder {
+        private DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs $;
+
+        public Builder() {
+            $ = new DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs();
+        }
+        public DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs build() {
+            return $;
+        }
+    }
+
+}
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreState.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreState.java
index e06a431346..53cd222d1a 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreState.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/inputs/DataStoreState.java
@@ -139,7 +139,7 @@ public Optional> documentProcessin
 
     /**
      * The industry vertical that the data store registers.
-     * Possible values are: `GENERIC`, `MEDIA`.
+     * Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`.
      * 
      */
     @Import(name="industryVertical")
@@ -147,7 +147,7 @@ public Optional> documentProcessin
 
     /**
      * @return The industry vertical that the data store registers.
-     * Possible values are: `GENERIC`, `MEDIA`.
+     * Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`.
      * 
      */
     public Optional> industryVertical() {
@@ -238,7 +238,7 @@ public Optional> skipDefaultSchemaCreation() {
 
     /**
      * The solutions that the data store enrolls.
-     * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.
+     * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.
      * 
      */
     @Import(name="solutionTypes")
@@ -246,7 +246,7 @@ public Optional> skipDefaultSchemaCreation() {
 
     /**
      * @return The solutions that the data store enrolls.
-     * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.
+     * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.
      * 
      */
     public Optional>> solutionTypes() {
@@ -452,7 +452,7 @@ public Builder documentProcessingConfig(DataStoreDocumentProcessingConfigArgs do
 
         /**
          * @param industryVertical The industry vertical that the data store registers.
-         * Possible values are: `GENERIC`, `MEDIA`.
+         * Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`.
          * 
          * @return builder
          * 
@@ -464,7 +464,7 @@ public Builder industryVertical(@Nullable Output industryVertical) {
 
         /**
          * @param industryVertical The industry vertical that the data store registers.
-         * Possible values are: `GENERIC`, `MEDIA`.
+         * Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`.
          * 
          * @return builder
          * 
@@ -581,7 +581,7 @@ public Builder skipDefaultSchemaCreation(Boolean skipDefaultSchemaCreation) {
 
         /**
          * @param solutionTypes The solutions that the data store enrolls.
-         * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.
+         * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.
          * 
          * @return builder
          * 
@@ -593,7 +593,7 @@ public Builder solutionTypes(@Nullable Output> solutionTypes) {
 
         /**
          * @param solutionTypes The solutions that the data store enrolls.
-         * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.
+         * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.
          * 
          * @return builder
          * 
@@ -604,7 +604,7 @@ public Builder solutionTypes(List solutionTypes) {
 
         /**
          * @param solutionTypes The solutions that the data store enrolls.
-         * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`.
+         * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`.
          * 
          * @return builder
          * 
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfig.java
index 1bae828359..b6ea88b70f 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfig.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfig.java
@@ -4,6 +4,7 @@
 package com.pulumi.gcp.discoveryengine.outputs;
 
 import com.pulumi.core.annotations.CustomType;
+import com.pulumi.gcp.discoveryengine.outputs.DataStoreDocumentProcessingConfigChunkingConfig;
 import com.pulumi.gcp.discoveryengine.outputs.DataStoreDocumentProcessingConfigDefaultParsingConfig;
 import com.pulumi.gcp.discoveryengine.outputs.DataStoreDocumentProcessingConfigParsingConfigOverride;
 import java.lang.String;
@@ -14,6 +15,12 @@
 
 @CustomType
 public final class DataStoreDocumentProcessingConfig {
+    /**
+     * @return Whether chunking mode is enabled.
+     * Structure is documented below.
+     * 
+     */
+    private @Nullable DataStoreDocumentProcessingConfigChunkingConfig chunkingConfig;
     /**
      * @return Configurations for default Document parser. If not specified, this resource
      * will be configured to use a default DigitalParsingConfig, and the default parsing
@@ -36,6 +43,14 @@ public final class DataStoreDocumentProcessingConfig {
     private @Nullable List parsingConfigOverrides;
 
     private DataStoreDocumentProcessingConfig() {}
+    /**
+     * @return Whether chunking mode is enabled.
+     * Structure is documented below.
+     * 
+     */
+    public Optional chunkingConfig() {
+        return Optional.ofNullable(this.chunkingConfig);
+    }
     /**
      * @return Configurations for default Document parser. If not specified, this resource
      * will be configured to use a default DigitalParsingConfig, and the default parsing
@@ -72,17 +87,25 @@ public static Builder builder(DataStoreDocumentProcessingConfig defaults) {
     }
     @CustomType.Builder
     public static final class Builder {
+        private @Nullable DataStoreDocumentProcessingConfigChunkingConfig chunkingConfig;
         private @Nullable DataStoreDocumentProcessingConfigDefaultParsingConfig defaultParsingConfig;
         private @Nullable String name;
         private @Nullable List parsingConfigOverrides;
         public Builder() {}
         public Builder(DataStoreDocumentProcessingConfig defaults) {
     	      Objects.requireNonNull(defaults);
+    	      this.chunkingConfig = defaults.chunkingConfig;
     	      this.defaultParsingConfig = defaults.defaultParsingConfig;
     	      this.name = defaults.name;
     	      this.parsingConfigOverrides = defaults.parsingConfigOverrides;
         }
 
+        @CustomType.Setter
+        public Builder chunkingConfig(@Nullable DataStoreDocumentProcessingConfigChunkingConfig chunkingConfig) {
+
+            this.chunkingConfig = chunkingConfig;
+            return this;
+        }
         @CustomType.Setter
         public Builder defaultParsingConfig(@Nullable DataStoreDocumentProcessingConfigDefaultParsingConfig defaultParsingConfig) {
 
@@ -106,6 +129,7 @@ public Builder parsingConfigOverrides(DataStoreDocumentProcessingConfigParsingCo
         }
         public DataStoreDocumentProcessingConfig build() {
             final var _resultValue = new DataStoreDocumentProcessingConfig();
+            _resultValue.chunkingConfig = chunkingConfig;
             _resultValue.defaultParsingConfig = defaultParsingConfig;
             _resultValue.name = name;
             _resultValue.parsingConfigOverrides = parsingConfigOverrides;
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigChunkingConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigChunkingConfig.java
new file mode 100644
index 0000000000..e8fc4eb929
--- /dev/null
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigChunkingConfig.java
@@ -0,0 +1,59 @@
+// *** WARNING: this file was generated by pulumi-java-gen. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+package com.pulumi.gcp.discoveryengine.outputs;
+
+import com.pulumi.core.annotations.CustomType;
+import com.pulumi.gcp.discoveryengine.outputs.DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig;
+import java.util.Objects;
+import java.util.Optional;
+import javax.annotation.Nullable;
+
+@CustomType
+public final class DataStoreDocumentProcessingConfigChunkingConfig {
+    /**
+     * @return Configuration for the layout based chunking.
+     * Structure is documented below.
+     * 
+     */
+    private @Nullable DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig layoutBasedChunkingConfig;
+
+    private DataStoreDocumentProcessingConfigChunkingConfig() {}
+    /**
+     * @return Configuration for the layout based chunking.
+     * Structure is documented below.
+     * 
+     */
+    public Optional layoutBasedChunkingConfig() {
+        return Optional.ofNullable(this.layoutBasedChunkingConfig);
+    }
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static Builder builder(DataStoreDocumentProcessingConfigChunkingConfig defaults) {
+        return new Builder(defaults);
+    }
+    @CustomType.Builder
+    public static final class Builder {
+        private @Nullable DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig layoutBasedChunkingConfig;
+        public Builder() {}
+        public Builder(DataStoreDocumentProcessingConfigChunkingConfig defaults) {
+    	      Objects.requireNonNull(defaults);
+    	      this.layoutBasedChunkingConfig = defaults.layoutBasedChunkingConfig;
+        }
+
+        @CustomType.Setter
+        public Builder layoutBasedChunkingConfig(@Nullable DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig layoutBasedChunkingConfig) {
+
+            this.layoutBasedChunkingConfig = layoutBasedChunkingConfig;
+            return this;
+        }
+        public DataStoreDocumentProcessingConfigChunkingConfig build() {
+            final var _resultValue = new DataStoreDocumentProcessingConfigChunkingConfig();
+            _resultValue.layoutBasedChunkingConfig = layoutBasedChunkingConfig;
+            return _resultValue;
+        }
+    }
+}
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig.java
new file mode 100644
index 0000000000..73aebce126
--- /dev/null
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig.java
@@ -0,0 +1,83 @@
+// *** WARNING: this file was generated by pulumi-java-gen. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+package com.pulumi.gcp.discoveryengine.outputs;
+
+import com.pulumi.core.annotations.CustomType;
+import java.lang.Boolean;
+import java.lang.Integer;
+import java.util.Objects;
+import java.util.Optional;
+import javax.annotation.Nullable;
+
+@CustomType
+public final class DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig {
+    /**
+     * @return The token size limit for each chunk.
+     * Supported values: 100-500 (inclusive). Default value: 500.
+     * 
+     */
+    private @Nullable Integer chunkSize;
+    /**
+     * @return Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss.
+     * Default value: False.
+     * 
+     */
+    private @Nullable Boolean includeAncestorHeadings;
+
+    private DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig() {}
+    /**
+     * @return The token size limit for each chunk.
+     * Supported values: 100-500 (inclusive). Default value: 500.
+     * 
+     */
+    public Optional chunkSize() {
+        return Optional.ofNullable(this.chunkSize);
+    }
+    /**
+     * @return Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss.
+     * Default value: False.
+     * 
+     */
+    public Optional includeAncestorHeadings() {
+        return Optional.ofNullable(this.includeAncestorHeadings);
+    }
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static Builder builder(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig defaults) {
+        return new Builder(defaults);
+    }
+    @CustomType.Builder
+    public static final class Builder {
+        private @Nullable Integer chunkSize;
+        private @Nullable Boolean includeAncestorHeadings;
+        public Builder() {}
+        public Builder(DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig defaults) {
+    	      Objects.requireNonNull(defaults);
+    	      this.chunkSize = defaults.chunkSize;
+    	      this.includeAncestorHeadings = defaults.includeAncestorHeadings;
+        }
+
+        @CustomType.Setter
+        public Builder chunkSize(@Nullable Integer chunkSize) {
+
+            this.chunkSize = chunkSize;
+            return this;
+        }
+        @CustomType.Setter
+        public Builder includeAncestorHeadings(@Nullable Boolean includeAncestorHeadings) {
+
+            this.includeAncestorHeadings = includeAncestorHeadings;
+            return this;
+        }
+        public DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig build() {
+            final var _resultValue = new DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig();
+            _resultValue.chunkSize = chunkSize;
+            _resultValue.includeAncestorHeadings = includeAncestorHeadings;
+            return _resultValue;
+        }
+    }
+}
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigDefaultParsingConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigDefaultParsingConfig.java
index 0e0da5efcd..b895863b86 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigDefaultParsingConfig.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigDefaultParsingConfig.java
@@ -5,6 +5,7 @@
 
 import com.pulumi.core.annotations.CustomType;
 import com.pulumi.gcp.discoveryengine.outputs.DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig;
+import com.pulumi.gcp.discoveryengine.outputs.DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig;
 import com.pulumi.gcp.discoveryengine.outputs.DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig;
 import java.util.Objects;
 import java.util.Optional;
@@ -17,6 +18,11 @@ public final class DataStoreDocumentProcessingConfigDefaultParsingConfig {
      * 
      */
     private @Nullable DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig digitalParsingConfig;
+    /**
+     * @return Configurations applied to layout parser.
+     * 
+     */
+    private @Nullable DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig layoutParsingConfig;
     /**
      * @return Configurations applied to OCR parser. Currently it only applies to PDFs.
      * Structure is documented below.
@@ -32,6 +38,13 @@ private DataStoreDocumentProcessingConfigDefaultParsingConfig() {}
     public Optional digitalParsingConfig() {
         return Optional.ofNullable(this.digitalParsingConfig);
     }
+    /**
+     * @return Configurations applied to layout parser.
+     * 
+     */
+    public Optional layoutParsingConfig() {
+        return Optional.ofNullable(this.layoutParsingConfig);
+    }
     /**
      * @return Configurations applied to OCR parser. Currently it only applies to PDFs.
      * Structure is documented below.
@@ -51,11 +64,13 @@ public static Builder builder(DataStoreDocumentProcessingConfigDefaultParsingCon
     @CustomType.Builder
     public static final class Builder {
         private @Nullable DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig digitalParsingConfig;
+        private @Nullable DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig layoutParsingConfig;
         private @Nullable DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig ocrParsingConfig;
         public Builder() {}
         public Builder(DataStoreDocumentProcessingConfigDefaultParsingConfig defaults) {
     	      Objects.requireNonNull(defaults);
     	      this.digitalParsingConfig = defaults.digitalParsingConfig;
+    	      this.layoutParsingConfig = defaults.layoutParsingConfig;
     	      this.ocrParsingConfig = defaults.ocrParsingConfig;
         }
 
@@ -66,6 +81,12 @@ public Builder digitalParsingConfig(@Nullable DataStoreDocumentProcessingConfigD
             return this;
         }
         @CustomType.Setter
+        public Builder layoutParsingConfig(@Nullable DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig layoutParsingConfig) {
+
+            this.layoutParsingConfig = layoutParsingConfig;
+            return this;
+        }
+        @CustomType.Setter
         public Builder ocrParsingConfig(@Nullable DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig ocrParsingConfig) {
 
             this.ocrParsingConfig = ocrParsingConfig;
@@ -74,6 +95,7 @@ public Builder ocrParsingConfig(@Nullable DataStoreDocumentProcessingConfigDefau
         public DataStoreDocumentProcessingConfigDefaultParsingConfig build() {
             final var _resultValue = new DataStoreDocumentProcessingConfigDefaultParsingConfig();
             _resultValue.digitalParsingConfig = digitalParsingConfig;
+            _resultValue.layoutParsingConfig = layoutParsingConfig;
             _resultValue.ocrParsingConfig = ocrParsingConfig;
             return _resultValue;
         }
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig.java
new file mode 100644
index 0000000000..def64a2779
--- /dev/null
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig.java
@@ -0,0 +1,32 @@
+// *** WARNING: this file was generated by pulumi-java-gen. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+package com.pulumi.gcp.discoveryengine.outputs;
+
+import com.pulumi.core.annotations.CustomType;
+import java.util.Objects;
+
+@CustomType
+public final class DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig {
+    private DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig() {}
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static Builder builder(DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig defaults) {
+        return new Builder(defaults);
+    }
+    @CustomType.Builder
+    public static final class Builder {
+        public Builder() {}
+        public Builder(DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig defaults) {
+    	      Objects.requireNonNull(defaults);
+        }
+
+        public DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig build() {
+            final var _resultValue = new DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig();
+            return _resultValue;
+        }
+    }
+}
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigParsingConfigOverride.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigParsingConfigOverride.java
index a661822b1e..15e5bbb4dc 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigParsingConfigOverride.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigParsingConfigOverride.java
@@ -6,6 +6,7 @@
 import com.pulumi.core.annotations.CustomType;
 import com.pulumi.exceptions.MissingRequiredPropertyException;
 import com.pulumi.gcp.discoveryengine.outputs.DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfig;
+import com.pulumi.gcp.discoveryengine.outputs.DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig;
 import com.pulumi.gcp.discoveryengine.outputs.DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig;
 import java.lang.String;
 import java.util.Objects;
@@ -24,6 +25,11 @@ public final class DataStoreDocumentProcessingConfigParsingConfigOverride {
      * 
      */
     private String fileType;
+    /**
+     * @return Configurations applied to layout parser.
+     * 
+     */
+    private @Nullable DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig layoutParsingConfig;
     /**
      * @return Configurations applied to OCR parser. Currently it only applies to PDFs.
      * Structure is documented below.
@@ -46,6 +52,13 @@ public Optional layoutParsingConfig() {
+        return Optional.ofNullable(this.layoutParsingConfig);
+    }
     /**
      * @return Configurations applied to OCR parser. Currently it only applies to PDFs.
      * Structure is documented below.
@@ -66,12 +79,14 @@ public static Builder builder(DataStoreDocumentProcessingConfigParsingConfigOver
     public static final class Builder {
         private @Nullable DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfig digitalParsingConfig;
         private String fileType;
+        private @Nullable DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig layoutParsingConfig;
         private @Nullable DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig ocrParsingConfig;
         public Builder() {}
         public Builder(DataStoreDocumentProcessingConfigParsingConfigOverride defaults) {
     	      Objects.requireNonNull(defaults);
     	      this.digitalParsingConfig = defaults.digitalParsingConfig;
     	      this.fileType = defaults.fileType;
+    	      this.layoutParsingConfig = defaults.layoutParsingConfig;
     	      this.ocrParsingConfig = defaults.ocrParsingConfig;
         }
 
@@ -90,6 +105,12 @@ public Builder fileType(String fileType) {
             return this;
         }
         @CustomType.Setter
+        public Builder layoutParsingConfig(@Nullable DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig layoutParsingConfig) {
+
+            this.layoutParsingConfig = layoutParsingConfig;
+            return this;
+        }
+        @CustomType.Setter
         public Builder ocrParsingConfig(@Nullable DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig ocrParsingConfig) {
 
             this.ocrParsingConfig = ocrParsingConfig;
@@ -99,6 +120,7 @@ public DataStoreDocumentProcessingConfigParsingConfigOverride build() {
             final var _resultValue = new DataStoreDocumentProcessingConfigParsingConfigOverride();
             _resultValue.digitalParsingConfig = digitalParsingConfig;
             _resultValue.fileType = fileType;
+            _resultValue.layoutParsingConfig = layoutParsingConfig;
             _resultValue.ocrParsingConfig = ocrParsingConfig;
             return _resultValue;
         }
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig.java
new file mode 100644
index 0000000000..1e04f9af6b
--- /dev/null
+++ b/sdk/java/src/main/java/com/pulumi/gcp/discoveryengine/outputs/DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig.java
@@ -0,0 +1,32 @@
+// *** WARNING: this file was generated by pulumi-java-gen. ***
+// *** Do not edit by hand unless you're certain you know what you are doing! ***
+
+package com.pulumi.gcp.discoveryengine.outputs;
+
+import com.pulumi.core.annotations.CustomType;
+import java.util.Objects;
+
+@CustomType
+public final class DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig {
+    private DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig() {}
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static Builder builder(DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig defaults) {
+        return new Builder(defaults);
+    }
+    @CustomType.Builder
+    public static final class Builder {
+        public Builder() {}
+        public Builder(DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig defaults) {
+    	      Objects.requireNonNull(defaults);
+        }
+
+        public DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig build() {
+            final var _resultValue = new DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig();
+            return _resultValue;
+        }
+    }
+}
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/firebase/DatabaseInstance.java b/sdk/java/src/main/java/com/pulumi/gcp/firebase/DatabaseInstance.java
index 834b85d103..8b9299ccf5 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/firebase/DatabaseInstance.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/firebase/DatabaseInstance.java
@@ -205,14 +205,14 @@ public Output databaseUrl() {
         return this.databaseUrl;
     }
     /**
-     * The intended database state.
+     * The intended database state. Possible values: ACTIVE, DISABLED.
      * 
      */
     @Export(name="desiredState", refs={String.class}, tree="[0]")
     private Output desiredState;
 
     /**
-     * @return The intended database state.
+     * @return The intended database state. Possible values: ACTIVE, DISABLED.
      * 
      */
     public Output> desiredState() {
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/firebase/DatabaseInstanceArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/firebase/DatabaseInstanceArgs.java
index 0748690103..e3efffb9a9 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/firebase/DatabaseInstanceArgs.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/firebase/DatabaseInstanceArgs.java
@@ -17,14 +17,14 @@ public final class DatabaseInstanceArgs extends com.pulumi.resources.ResourceArg
     public static final DatabaseInstanceArgs Empty = new DatabaseInstanceArgs();
 
     /**
-     * The intended database state.
+     * The intended database state. Possible values: ACTIVE, DISABLED.
      * 
      */
     @Import(name="desiredState")
     private @Nullable Output desiredState;
 
     /**
-     * @return The intended database state.
+     * @return The intended database state. Possible values: ACTIVE, DISABLED.
      * 
      */
     public Optional> desiredState() {
@@ -140,7 +140,7 @@ public Builder(DatabaseInstanceArgs defaults) {
         }
 
         /**
-         * @param desiredState The intended database state.
+         * @param desiredState The intended database state. Possible values: ACTIVE, DISABLED.
          * 
          * @return builder
          * 
@@ -151,7 +151,7 @@ public Builder desiredState(@Nullable Output desiredState) {
         }
 
         /**
-         * @param desiredState The intended database state.
+         * @param desiredState The intended database state. Possible values: ACTIVE, DISABLED.
          * 
          * @return builder
          * 
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/firebase/inputs/DatabaseInstanceState.java b/sdk/java/src/main/java/com/pulumi/gcp/firebase/inputs/DatabaseInstanceState.java
index ee128e2e9d..f489b82c01 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/firebase/inputs/DatabaseInstanceState.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/firebase/inputs/DatabaseInstanceState.java
@@ -33,14 +33,14 @@ public Optional> databaseUrl() {
     }
 
     /**
-     * The intended database state.
+     * The intended database state. Possible values: ACTIVE, DISABLED.
      * 
      */
     @Import(name="desiredState")
     private @Nullable Output desiredState;
 
     /**
-     * @return The intended database state.
+     * @return The intended database state. Possible values: ACTIVE, DISABLED.
      * 
      */
     public Optional> desiredState() {
@@ -218,7 +218,7 @@ public Builder databaseUrl(String databaseUrl) {
         }
 
         /**
-         * @param desiredState The intended database state.
+         * @param desiredState The intended database state. Possible values: ACTIVE, DISABLED.
          * 
          * @return builder
          * 
@@ -229,7 +229,7 @@ public Builder desiredState(@Nullable Output desiredState) {
         }
 
         /**
-         * @param desiredState The intended database state.
+         * @param desiredState The intended database state. Possible values: ACTIVE, DISABLED.
          * 
          * @return builder
          * 
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/gkehub/FeatureMembership.java b/sdk/java/src/main/java/com/pulumi/gcp/gkehub/FeatureMembership.java
index 6d49fcc799..45a3ab7cef 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/gkehub/FeatureMembership.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/gkehub/FeatureMembership.java
@@ -84,8 +84,9 @@
  *             .feature(feature.name())
  *             .membership(membership.membershipId())
  *             .configmanagement(FeatureMembershipConfigmanagementArgs.builder()
- *                 .version("1.6.2")
+ *                 .version("1.19.0")
  *                 .configSync(FeatureMembershipConfigmanagementConfigSyncArgs.builder()
+ *                     .enabled(true)
  *                     .git(FeatureMembershipConfigmanagementConfigSyncGitArgs.builder()
  *                         .syncRepo("https://github.com/hashicorp/terraform")
  *                         .build())
@@ -160,8 +161,9 @@
  *             .feature(feature.name())
  *             .membership(membership.membershipId())
  *             .configmanagement(FeatureMembershipConfigmanagementArgs.builder()
- *                 .version("1.15.1")
+ *                 .version("1.19.0")
  *                 .configSync(FeatureMembershipConfigmanagementConfigSyncArgs.builder()
+ *                     .enabled(true)
  *                     .oci(FeatureMembershipConfigmanagementConfigSyncOciArgs.builder()
  *                         .syncRepo("us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest")
  *                         .policyDir("config-connector")
@@ -349,8 +351,9 @@
  *             .membership(membership.membershipId())
  *             .membershipLocation(membership.location())
  *             .configmanagement(FeatureMembershipConfigmanagementArgs.builder()
- *                 .version("1.6.2")
+ *                 .version("1.19.0")
  *                 .configSync(FeatureMembershipConfigmanagementConfigSyncArgs.builder()
+ *                     .enabled(true)
  *                     .git(FeatureMembershipConfigmanagementConfigSyncGitArgs.builder()
  *                         .syncRepo("https://github.com/hashicorp/terraform")
  *                         .build())
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/gkehub/inputs/FeatureMembershipConfigmanagementArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/gkehub/inputs/FeatureMembershipConfigmanagementArgs.java
index 25e2993753..bd084b5ec1 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/gkehub/inputs/FeatureMembershipConfigmanagementArgs.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/gkehub/inputs/FeatureMembershipConfigmanagementArgs.java
@@ -20,14 +20,18 @@ public final class FeatureMembershipConfigmanagementArgs extends com.pulumi.reso
     public static final FeatureMembershipConfigmanagementArgs Empty = new FeatureMembershipConfigmanagementArgs();
 
     /**
+     * (Optional, Deprecated)
      * Binauthz configuration for the cluster. Structure is documented below.
+     * This field will be ignored and should not be set.
      * 
      */
     @Import(name="binauthz")
     private @Nullable Output binauthz;
 
     /**
-     * @return Binauthz configuration for the cluster. Structure is documented below.
+     * @return (Optional, Deprecated)
+     * Binauthz configuration for the cluster. Structure is documented below.
+     * This field will be ignored and should not be set.
      * 
      */
     public Optional> binauthz() {
@@ -51,6 +55,10 @@ public Optional> configS
 
     /**
      * Hierarchy Controller configuration for the cluster. Structure is documented below.
+     * Configuring Hierarchy Controller through the configmanagement feature is no longer recommended.
+     * Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead.
+     * Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller)
+     * to migrate from Hierarchy Controller to HNC.
      * 
      */
     @Import(name="hierarchyController")
@@ -58,6 +66,10 @@ public Optional> configS
 
     /**
      * @return Hierarchy Controller configuration for the cluster. Structure is documented below.
+     * Configuring Hierarchy Controller through the configmanagement feature is no longer recommended.
+     * Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead.
+     * Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller)
+     * to migrate from Hierarchy Controller to HNC.
      * 
      */
     public Optional> hierarchyController() {
@@ -81,6 +93,8 @@ public Optional> management() {
 
     /**
      * Policy Controller configuration for the cluster. Structure is documented below.
+     * Configuring Policy Controller through the configmanagement feature is no longer recommended.
+     * Use the policycontroller feature instead.
      * 
      */
     @Import(name="policyController")
@@ -88,6 +102,8 @@ public Optional> management() {
 
     /**
      * @return Policy Controller configuration for the cluster. Structure is documented below.
+     * Configuring Policy Controller through the configmanagement feature is no longer recommended.
+     * Use the policycontroller feature instead.
      * 
      */
     public Optional> policyController() {
@@ -139,7 +155,9 @@ public Builder(FeatureMembershipConfigmanagementArgs defaults) {
         }
 
         /**
-         * @param binauthz Binauthz configuration for the cluster. Structure is documented below.
+         * @param binauthz (Optional, Deprecated)
+         * Binauthz configuration for the cluster. Structure is documented below.
+         * This field will be ignored and should not be set.
          * 
          * @return builder
          * 
@@ -150,7 +168,9 @@ public Builder binauthz(@Nullable Output binauthz() {
@@ -63,6 +73,10 @@ public Optional configSync() {
     }
     /**
      * @return Hierarchy Controller configuration for the cluster. Structure is documented below.
+     * Configuring Hierarchy Controller through the configmanagement feature is no longer recommended.
+     * Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead.
+     * Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller)
+     * to migrate from Hierarchy Controller to HNC.
      * 
      */
     public Optional hierarchyController() {
@@ -77,6 +91,8 @@ public Optional management() {
     }
     /**
      * @return Policy Controller configuration for the cluster. Structure is documented below.
+     * Configuring Policy Controller through the configmanagement feature is no longer recommended.
+     * Use the policycontroller feature instead.
      * 
      */
     public Optional policyController() {
diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/WorkloadIdentityPoolProvider.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/WorkloadIdentityPoolProvider.java
index 55819746cb..5e79253dd2 100644
--- a/sdk/java/src/main/java/com/pulumi/gcp/iam/WorkloadIdentityPoolProvider.java
+++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/WorkloadIdentityPoolProvider.java
@@ -13,6 +13,7 @@
 import com.pulumi.gcp.iam.outputs.WorkloadIdentityPoolProviderAws;
 import com.pulumi.gcp.iam.outputs.WorkloadIdentityPoolProviderOidc;
 import com.pulumi.gcp.iam.outputs.WorkloadIdentityPoolProviderSaml;
+import com.pulumi.gcp.iam.outputs.WorkloadIdentityPoolProviderX509;
 import java.lang.Boolean;
 import java.lang.String;
 import java.util.Map;
@@ -407,6 +408,120 @@
  * }
  * 
* <!--End PulumiCodeChooser --> + * ### Iam Workload Identity Pool Provider X509 Basic + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.iam.WorkloadIdentityPool;
+ * import com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;
+ * import com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;
+ * import com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;
+ * import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderX509Args;
+ * import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderX509TrustStoreArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var pool = new WorkloadIdentityPool("pool", WorkloadIdentityPoolArgs.builder()
+ *             .workloadIdentityPoolId("example-pool")
+ *             .build());
+ * 
+ *         var example = new WorkloadIdentityPoolProvider("example", WorkloadIdentityPoolProviderArgs.builder()
+ *             .workloadIdentityPoolId(pool.workloadIdentityPoolId())
+ *             .workloadIdentityPoolProviderId("example-prvdr")
+ *             .attributeMapping(Map.of("google.subject", "assertion.subject.dn.cn"))
+ *             .x509(WorkloadIdentityPoolProviderX509Args.builder()
+ *                 .trustStore(WorkloadIdentityPoolProviderX509TrustStoreArgs.builder()
+ *                     .trustAnchors(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs.builder()
+ *                         .pemCertificate(StdFunctions.file(FileArgs.builder()
+ *                             .input("test-fixtures/trust_anchor.pem")
+ *                             .build()).result())
+ *                         .build())
+ *                     .build())
+ *                 .build())
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> + * ### Iam Workload Identity Pool Provider X509 Full + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.iam.WorkloadIdentityPool;
+ * import com.pulumi.gcp.iam.WorkloadIdentityPoolArgs;
+ * import com.pulumi.gcp.iam.WorkloadIdentityPoolProvider;
+ * import com.pulumi.gcp.iam.WorkloadIdentityPoolProviderArgs;
+ * import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderX509Args;
+ * import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderX509TrustStoreArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var pool = new WorkloadIdentityPool("pool", WorkloadIdentityPoolArgs.builder()
+ *             .workloadIdentityPoolId("example-pool")
+ *             .build());
+ * 
+ *         var example = new WorkloadIdentityPoolProvider("example", WorkloadIdentityPoolProviderArgs.builder()
+ *             .workloadIdentityPoolId(pool.workloadIdentityPoolId())
+ *             .workloadIdentityPoolProviderId("example-prvdr")
+ *             .displayName("Name of provider")
+ *             .description("X.509 identity pool provider for automated test")
+ *             .disabled(true)
+ *             .attributeMapping(Map.of("google.subject", "assertion.subject.dn.cn"))
+ *             .x509(WorkloadIdentityPoolProviderX509Args.builder()
+ *                 .trustStore(WorkloadIdentityPoolProviderX509TrustStoreArgs.builder()
+ *                     .trustAnchors(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs.builder()
+ *                         .pemCertificate(StdFunctions.file(FileArgs.builder()
+ *                             .input("test-fixtures/trust_anchor.pem")
+ *                             .build()).result())
+ *                         .build())
+ *                     .intermediateCas(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs.builder()
+ *                         .pemCertificate(StdFunctions.file(FileArgs.builder()
+ *                             .input("test-fixtures/intermediate_ca.pem")
+ *                             .build()).result())
+ *                         .build())
+ *                     .build())
+ *                 .build())
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> * * ## Import * @@ -733,6 +848,24 @@ public Output workloadIdentityPoolId() { public Output workloadIdentityPoolProviderId() { return this.workloadIdentityPoolProviderId; } + /** + * An X.509-type identity provider represents a CA. It is trusted to assert a + * client identity if the client has a certificate that chains up to this CA. + * Structure is documented below. + * + */ + @Export(name="x509", refs={WorkloadIdentityPoolProviderX509.class}, tree="[0]") + private Output x509; + + /** + * @return An X.509-type identity provider represents a CA. It is trusted to assert a + * client identity if the client has a certificate that chains up to this CA. + * Structure is documented below. + * + */ + public Output> x509() { + return Codegen.optional(this.x509); + } /** * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/WorkloadIdentityPoolProviderArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/WorkloadIdentityPoolProviderArgs.java index c94c170cb3..e21dca2346 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/iam/WorkloadIdentityPoolProviderArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/WorkloadIdentityPoolProviderArgs.java @@ -9,6 +9,7 @@ import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderAwsArgs; import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderOidcArgs; import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderSamlArgs; +import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderX509Args; import java.lang.Boolean; import java.lang.String; import java.util.Map; @@ -288,6 +289,25 @@ public Output workloadIdentityPoolProviderId() { return this.workloadIdentityPoolProviderId; } + /** + * An X.509-type identity provider represents a CA. It is trusted to assert a + * client identity if the client has a certificate that chains up to this CA. + * Structure is documented below. + * + */ + @Import(name="x509") + private @Nullable Output x509; + + /** + * @return An X.509-type identity provider represents a CA. It is trusted to assert a + * client identity if the client has a certificate that chains up to this CA. + * Structure is documented below. + * + */ + public Optional> x509() { + return Optional.ofNullable(this.x509); + } + private WorkloadIdentityPoolProviderArgs() {} private WorkloadIdentityPoolProviderArgs(WorkloadIdentityPoolProviderArgs $) { @@ -302,6 +322,7 @@ private WorkloadIdentityPoolProviderArgs(WorkloadIdentityPoolProviderArgs $) { this.saml = $.saml; this.workloadIdentityPoolId = $.workloadIdentityPoolId; this.workloadIdentityPoolProviderId = $.workloadIdentityPoolProviderId; + this.x509 = $.x509; } public static Builder builder() { @@ -655,6 +676,31 @@ public Builder workloadIdentityPoolProviderId(String workloadIdentityPoolProvide return workloadIdentityPoolProviderId(Output.of(workloadIdentityPoolProviderId)); } + /** + * @param x509 An X.509-type identity provider represents a CA. It is trusted to assert a + * client identity if the client has a certificate that chains up to this CA. + * Structure is documented below. + * + * @return builder + * + */ + public Builder x509(@Nullable Output x509) { + $.x509 = x509; + return this; + } + + /** + * @param x509 An X.509-type identity provider represents a CA. It is trusted to assert a + * client identity if the client has a certificate that chains up to this CA. + * Structure is documented below. + * + * @return builder + * + */ + public Builder x509(WorkloadIdentityPoolProviderX509Args x509) { + return x509(Output.of(x509)); + } + public WorkloadIdentityPoolProviderArgs build() { if ($.workloadIdentityPoolId == null) { throw new MissingRequiredPropertyException("WorkloadIdentityPoolProviderArgs", "workloadIdentityPoolId"); diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderSamlArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderSamlArgs.java index 52f0613284..6acc4b0287 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderSamlArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderSamlArgs.java @@ -17,6 +17,8 @@ public final class WorkloadIdentityPoolProviderSamlArgs extends com.pulumi.resou /** * SAML Identity provider configuration metadata xml doc. * + * <a name="nested_x509"></a>The `x509` block supports: + * */ @Import(name="idpMetadataXml", required=true) private Output idpMetadataXml; @@ -24,6 +26,8 @@ public final class WorkloadIdentityPoolProviderSamlArgs extends com.pulumi.resou /** * @return SAML Identity provider configuration metadata xml doc. * + * <a name="nested_x509"></a>The `x509` block supports: + * */ public Output idpMetadataXml() { return this.idpMetadataXml; @@ -56,6 +60,8 @@ public Builder(WorkloadIdentityPoolProviderSamlArgs defaults) { /** * @param idpMetadataXml SAML Identity provider configuration metadata xml doc. * + * <a name="nested_x509"></a>The `x509` block supports: + * * @return builder * */ @@ -67,6 +73,8 @@ public Builder idpMetadataXml(Output idpMetadataXml) { /** * @param idpMetadataXml SAML Identity provider configuration metadata xml doc. * + * <a name="nested_x509"></a>The `x509` block supports: + * * @return builder * */ diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderState.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderState.java index bfb75bdc13..9495c5303b 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderState.java @@ -8,6 +8,7 @@ import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderAwsArgs; import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderOidcArgs; import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderSamlArgs; +import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderX509Args; import java.lang.Boolean; import java.lang.String; import java.util.Map; @@ -331,6 +332,25 @@ public Optional> workloadIdentityPoolProviderId() { return Optional.ofNullable(this.workloadIdentityPoolProviderId); } + /** + * An X.509-type identity provider represents a CA. It is trusted to assert a + * client identity if the client has a certificate that chains up to this CA. + * Structure is documented below. + * + */ + @Import(name="x509") + private @Nullable Output x509; + + /** + * @return An X.509-type identity provider represents a CA. It is trusted to assert a + * client identity if the client has a certificate that chains up to this CA. + * Structure is documented below. + * + */ + public Optional> x509() { + return Optional.ofNullable(this.x509); + } + private WorkloadIdentityPoolProviderState() {} private WorkloadIdentityPoolProviderState(WorkloadIdentityPoolProviderState $) { @@ -347,6 +367,7 @@ private WorkloadIdentityPoolProviderState(WorkloadIdentityPoolProviderState $) { this.state = $.state; this.workloadIdentityPoolId = $.workloadIdentityPoolId; this.workloadIdentityPoolProviderId = $.workloadIdentityPoolProviderId; + this.x509 = $.x509; } public static Builder builder() { @@ -756,6 +777,31 @@ public Builder workloadIdentityPoolProviderId(String workloadIdentityPoolProvide return workloadIdentityPoolProviderId(Output.of(workloadIdentityPoolProviderId)); } + /** + * @param x509 An X.509-type identity provider represents a CA. It is trusted to assert a + * client identity if the client has a certificate that chains up to this CA. + * Structure is documented below. + * + * @return builder + * + */ + public Builder x509(@Nullable Output x509) { + $.x509 = x509; + return this; + } + + /** + * @param x509 An X.509-type identity provider represents a CA. It is trusted to assert a + * client identity if the client has a certificate that chains up to this CA. + * Structure is documented below. + * + * @return builder + * + */ + public Builder x509(WorkloadIdentityPoolProviderX509Args x509) { + return x509(Output.of(x509)); + } + public WorkloadIdentityPoolProviderState build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509Args.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509Args.java new file mode 100644 index 0000000000..abbca9a082 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509Args.java @@ -0,0 +1,101 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.iam.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderX509TrustStoreArgs; +import java.util.Objects; + + +public final class WorkloadIdentityPoolProviderX509Args extends com.pulumi.resources.ResourceArgs { + + public static final WorkloadIdentityPoolProviderX509Args Empty = new WorkloadIdentityPoolProviderX509Args(); + + /** + * A Trust store, use this trust store as a wrapper to config the trust + * anchor and optional intermediate cas to help build the trust chain for + * the incoming end entity certificate. Follow the x509 guidelines to + * define those PEM encoded certs. Only 1 trust store is currently + * supported. + * + */ + @Import(name="trustStore", required=true) + private Output trustStore; + + /** + * @return A Trust store, use this trust store as a wrapper to config the trust + * anchor and optional intermediate cas to help build the trust chain for + * the incoming end entity certificate. Follow the x509 guidelines to + * define those PEM encoded certs. Only 1 trust store is currently + * supported. + * + */ + public Output trustStore() { + return this.trustStore; + } + + private WorkloadIdentityPoolProviderX509Args() {} + + private WorkloadIdentityPoolProviderX509Args(WorkloadIdentityPoolProviderX509Args $) { + this.trustStore = $.trustStore; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(WorkloadIdentityPoolProviderX509Args defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private WorkloadIdentityPoolProviderX509Args $; + + public Builder() { + $ = new WorkloadIdentityPoolProviderX509Args(); + } + + public Builder(WorkloadIdentityPoolProviderX509Args defaults) { + $ = new WorkloadIdentityPoolProviderX509Args(Objects.requireNonNull(defaults)); + } + + /** + * @param trustStore A Trust store, use this trust store as a wrapper to config the trust + * anchor and optional intermediate cas to help build the trust chain for + * the incoming end entity certificate. Follow the x509 guidelines to + * define those PEM encoded certs. Only 1 trust store is currently + * supported. + * + * @return builder + * + */ + public Builder trustStore(Output trustStore) { + $.trustStore = trustStore; + return this; + } + + /** + * @param trustStore A Trust store, use this trust store as a wrapper to config the trust + * anchor and optional intermediate cas to help build the trust chain for + * the incoming end entity certificate. Follow the x509 guidelines to + * define those PEM encoded certs. Only 1 trust store is currently + * supported. + * + * @return builder + * + */ + public Builder trustStore(WorkloadIdentityPoolProviderX509TrustStoreArgs trustStore) { + return trustStore(Output.of(trustStore)); + } + + public WorkloadIdentityPoolProviderX509Args build() { + if ($.trustStore == null) { + throw new MissingRequiredPropertyException("WorkloadIdentityPoolProviderX509Args", "trustStore"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509TrustStoreArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509TrustStoreArgs.java new file mode 100644 index 0000000000..a695029f86 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509TrustStoreArgs.java @@ -0,0 +1,176 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.iam.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs; +import com.pulumi.gcp.iam.inputs.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class WorkloadIdentityPoolProviderX509TrustStoreArgs extends com.pulumi.resources.ResourceArgs { + + public static final WorkloadIdentityPoolProviderX509TrustStoreArgs Empty = new WorkloadIdentityPoolProviderX509TrustStoreArgs(); + + /** + * Set of intermediate CA certificates used for building the trust chain to + * trust anchor. + * IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + * Structure is documented below. + * + */ + @Import(name="intermediateCas") + private @Nullable Output> intermediateCas; + + /** + * @return Set of intermediate CA certificates used for building the trust chain to + * trust anchor. + * IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + * Structure is documented below. + * + */ + public Optional>> intermediateCas() { + return Optional.ofNullable(this.intermediateCas); + } + + /** + * List of Trust Anchors to be used while performing validation + * against a given TrustStore. The incoming end entity's certificate + * must be chained up to one of the trust anchors here. + * Structure is documented below. + * + */ + @Import(name="trustAnchors", required=true) + private Output> trustAnchors; + + /** + * @return List of Trust Anchors to be used while performing validation + * against a given TrustStore. The incoming end entity's certificate + * must be chained up to one of the trust anchors here. + * Structure is documented below. + * + */ + public Output> trustAnchors() { + return this.trustAnchors; + } + + private WorkloadIdentityPoolProviderX509TrustStoreArgs() {} + + private WorkloadIdentityPoolProviderX509TrustStoreArgs(WorkloadIdentityPoolProviderX509TrustStoreArgs $) { + this.intermediateCas = $.intermediateCas; + this.trustAnchors = $.trustAnchors; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(WorkloadIdentityPoolProviderX509TrustStoreArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private WorkloadIdentityPoolProviderX509TrustStoreArgs $; + + public Builder() { + $ = new WorkloadIdentityPoolProviderX509TrustStoreArgs(); + } + + public Builder(WorkloadIdentityPoolProviderX509TrustStoreArgs defaults) { + $ = new WorkloadIdentityPoolProviderX509TrustStoreArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param intermediateCas Set of intermediate CA certificates used for building the trust chain to + * trust anchor. + * IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + * Structure is documented below. + * + * @return builder + * + */ + public Builder intermediateCas(@Nullable Output> intermediateCas) { + $.intermediateCas = intermediateCas; + return this; + } + + /** + * @param intermediateCas Set of intermediate CA certificates used for building the trust chain to + * trust anchor. + * IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + * Structure is documented below. + * + * @return builder + * + */ + public Builder intermediateCas(List intermediateCas) { + return intermediateCas(Output.of(intermediateCas)); + } + + /** + * @param intermediateCas Set of intermediate CA certificates used for building the trust chain to + * trust anchor. + * IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + * Structure is documented below. + * + * @return builder + * + */ + public Builder intermediateCas(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs... intermediateCas) { + return intermediateCas(List.of(intermediateCas)); + } + + /** + * @param trustAnchors List of Trust Anchors to be used while performing validation + * against a given TrustStore. The incoming end entity's certificate + * must be chained up to one of the trust anchors here. + * Structure is documented below. + * + * @return builder + * + */ + public Builder trustAnchors(Output> trustAnchors) { + $.trustAnchors = trustAnchors; + return this; + } + + /** + * @param trustAnchors List of Trust Anchors to be used while performing validation + * against a given TrustStore. The incoming end entity's certificate + * must be chained up to one of the trust anchors here. + * Structure is documented below. + * + * @return builder + * + */ + public Builder trustAnchors(List trustAnchors) { + return trustAnchors(Output.of(trustAnchors)); + } + + /** + * @param trustAnchors List of Trust Anchors to be used while performing validation + * against a given TrustStore. The incoming end entity's certificate + * must be chained up to one of the trust anchors here. + * Structure is documented below. + * + * @return builder + * + */ + public Builder trustAnchors(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs... trustAnchors) { + return trustAnchors(List.of(trustAnchors)); + } + + public WorkloadIdentityPoolProviderX509TrustStoreArgs build() { + if ($.trustAnchors == null) { + throw new MissingRequiredPropertyException("WorkloadIdentityPoolProviderX509TrustStoreArgs", "trustAnchors"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs.java new file mode 100644 index 0000000000..475e8385d2 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs.java @@ -0,0 +1,87 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.iam.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs extends com.pulumi.resources.ResourceArgs { + + public static final WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs Empty = new WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs(); + + /** + * PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + */ + @Import(name="pemCertificate") + private @Nullable Output pemCertificate; + + /** + * @return PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + */ + public Optional> pemCertificate() { + return Optional.ofNullable(this.pemCertificate); + } + + private WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs() {} + + private WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs $) { + this.pemCertificate = $.pemCertificate; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs $; + + public Builder() { + $ = new WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs(); + } + + public Builder(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs defaults) { + $ = new WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param pemCertificate PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + * @return builder + * + */ + public Builder pemCertificate(@Nullable Output pemCertificate) { + $.pemCertificate = pemCertificate; + return this; + } + + /** + * @param pemCertificate PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + * @return builder + * + */ + public Builder pemCertificate(String pemCertificate) { + return pemCertificate(Output.of(pemCertificate)); + } + + public WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs.java new file mode 100644 index 0000000000..f8a3dcbb78 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/inputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs.java @@ -0,0 +1,87 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.iam.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs extends com.pulumi.resources.ResourceArgs { + + public static final WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs Empty = new WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs(); + + /** + * PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + */ + @Import(name="pemCertificate") + private @Nullable Output pemCertificate; + + /** + * @return PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + */ + public Optional> pemCertificate() { + return Optional.ofNullable(this.pemCertificate); + } + + private WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs() {} + + private WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs $) { + this.pemCertificate = $.pemCertificate; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs $; + + public Builder() { + $ = new WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs(); + } + + public Builder(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs defaults) { + $ = new WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param pemCertificate PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + * @return builder + * + */ + public Builder pemCertificate(@Nullable Output pemCertificate) { + $.pemCertificate = pemCertificate; + return this; + } + + /** + * @param pemCertificate PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + * @return builder + * + */ + public Builder pemCertificate(String pemCertificate) { + return pemCertificate(Output.of(pemCertificate)); + } + + public WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderResult.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderResult.java index e0df472ccf..133cfae154 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderResult.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderResult.java @@ -8,6 +8,7 @@ import com.pulumi.gcp.iam.outputs.GetWorkloadIdentityPoolProviderAw; import com.pulumi.gcp.iam.outputs.GetWorkloadIdentityPoolProviderOidc; import com.pulumi.gcp.iam.outputs.GetWorkloadIdentityPoolProviderSaml; +import com.pulumi.gcp.iam.outputs.GetWorkloadIdentityPoolProviderX509; import java.lang.Boolean; import java.lang.String; import java.util.List; @@ -36,6 +37,7 @@ public final class GetWorkloadIdentityPoolProviderResult { private String state; private String workloadIdentityPoolId; private String workloadIdentityPoolProviderId; + private List x509s; private GetWorkloadIdentityPoolProviderResult() {} public String attributeCondition() { @@ -84,6 +86,9 @@ public String workloadIdentityPoolId() { public String workloadIdentityPoolProviderId() { return this.workloadIdentityPoolProviderId; } + public List x509s() { + return this.x509s; + } public static Builder builder() { return new Builder(); @@ -108,6 +113,7 @@ public static final class Builder { private String state; private String workloadIdentityPoolId; private String workloadIdentityPoolProviderId; + private List x509s; public Builder() {} public Builder(GetWorkloadIdentityPoolProviderResult defaults) { Objects.requireNonNull(defaults); @@ -125,6 +131,7 @@ public Builder(GetWorkloadIdentityPoolProviderResult defaults) { this.state = defaults.state; this.workloadIdentityPoolId = defaults.workloadIdentityPoolId; this.workloadIdentityPoolProviderId = defaults.workloadIdentityPoolProviderId; + this.x509s = defaults.x509s; } @CustomType.Setter @@ -246,6 +253,17 @@ public Builder workloadIdentityPoolProviderId(String workloadIdentityPoolProvide this.workloadIdentityPoolProviderId = workloadIdentityPoolProviderId; return this; } + @CustomType.Setter + public Builder x509s(List x509s) { + if (x509s == null) { + throw new MissingRequiredPropertyException("GetWorkloadIdentityPoolProviderResult", "x509s"); + } + this.x509s = x509s; + return this; + } + public Builder x509s(GetWorkloadIdentityPoolProviderX509... x509s) { + return x509s(List.of(x509s)); + } public GetWorkloadIdentityPoolProviderResult build() { final var _resultValue = new GetWorkloadIdentityPoolProviderResult(); _resultValue.attributeCondition = attributeCondition; @@ -262,6 +280,7 @@ public GetWorkloadIdentityPoolProviderResult build() { _resultValue.state = state; _resultValue.workloadIdentityPoolId = workloadIdentityPoolId; _resultValue.workloadIdentityPoolProviderId = workloadIdentityPoolProviderId; + _resultValue.x509s = x509s; return _resultValue; } } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509.java new file mode 100644 index 0000000000..5062ac1474 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509.java @@ -0,0 +1,70 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.iam.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.iam.outputs.GetWorkloadIdentityPoolProviderX509TrustStore; +import java.util.List; +import java.util.Objects; + +@CustomType +public final class GetWorkloadIdentityPoolProviderX509 { + /** + * @return A Trust store, use this trust store as a wrapper to config the trust + * anchor and optional intermediate cas to help build the trust chain for + * the incoming end entity certificate. Follow the x509 guidelines to + * define those PEM encoded certs. Only 1 trust store is currently + * supported. + * + */ + private List trustStores; + + private GetWorkloadIdentityPoolProviderX509() {} + /** + * @return A Trust store, use this trust store as a wrapper to config the trust + * anchor and optional intermediate cas to help build the trust chain for + * the incoming end entity certificate. Follow the x509 guidelines to + * define those PEM encoded certs. Only 1 trust store is currently + * supported. + * + */ + public List trustStores() { + return this.trustStores; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetWorkloadIdentityPoolProviderX509 defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private List trustStores; + public Builder() {} + public Builder(GetWorkloadIdentityPoolProviderX509 defaults) { + Objects.requireNonNull(defaults); + this.trustStores = defaults.trustStores; + } + + @CustomType.Setter + public Builder trustStores(List trustStores) { + if (trustStores == null) { + throw new MissingRequiredPropertyException("GetWorkloadIdentityPoolProviderX509", "trustStores"); + } + this.trustStores = trustStores; + return this; + } + public Builder trustStores(GetWorkloadIdentityPoolProviderX509TrustStore... trustStores) { + return trustStores(List.of(trustStores)); + } + public GetWorkloadIdentityPoolProviderX509 build() { + final var _resultValue = new GetWorkloadIdentityPoolProviderX509(); + _resultValue.trustStores = trustStores; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509TrustStore.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509TrustStore.java new file mode 100644 index 0000000000..2a9f2d645b --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509TrustStore.java @@ -0,0 +1,97 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.iam.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.iam.outputs.GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa; +import com.pulumi.gcp.iam.outputs.GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor; +import java.util.List; +import java.util.Objects; + +@CustomType +public final class GetWorkloadIdentityPoolProviderX509TrustStore { + /** + * @return Set of intermediate CA certificates used for building the trust chain to + * trust anchor. + * IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + * + */ + private List intermediateCas; + /** + * @return List of Trust Anchors to be used while performing validation + * against a given TrustStore. The incoming end entity's certificate + * must be chained up to one of the trust anchors here. + * + */ + private List trustAnchors; + + private GetWorkloadIdentityPoolProviderX509TrustStore() {} + /** + * @return Set of intermediate CA certificates used for building the trust chain to + * trust anchor. + * IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + * + */ + public List intermediateCas() { + return this.intermediateCas; + } + /** + * @return List of Trust Anchors to be used while performing validation + * against a given TrustStore. The incoming end entity's certificate + * must be chained up to one of the trust anchors here. + * + */ + public List trustAnchors() { + return this.trustAnchors; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetWorkloadIdentityPoolProviderX509TrustStore defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private List intermediateCas; + private List trustAnchors; + public Builder() {} + public Builder(GetWorkloadIdentityPoolProviderX509TrustStore defaults) { + Objects.requireNonNull(defaults); + this.intermediateCas = defaults.intermediateCas; + this.trustAnchors = defaults.trustAnchors; + } + + @CustomType.Setter + public Builder intermediateCas(List intermediateCas) { + if (intermediateCas == null) { + throw new MissingRequiredPropertyException("GetWorkloadIdentityPoolProviderX509TrustStore", "intermediateCas"); + } + this.intermediateCas = intermediateCas; + return this; + } + public Builder intermediateCas(GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa... intermediateCas) { + return intermediateCas(List.of(intermediateCas)); + } + @CustomType.Setter + public Builder trustAnchors(List trustAnchors) { + if (trustAnchors == null) { + throw new MissingRequiredPropertyException("GetWorkloadIdentityPoolProviderX509TrustStore", "trustAnchors"); + } + this.trustAnchors = trustAnchors; + return this; + } + public Builder trustAnchors(GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor... trustAnchors) { + return trustAnchors(List.of(trustAnchors)); + } + public GetWorkloadIdentityPoolProviderX509TrustStore build() { + final var _resultValue = new GetWorkloadIdentityPoolProviderX509TrustStore(); + _resultValue.intermediateCas = intermediateCas; + _resultValue.trustAnchors = trustAnchors; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa.java new file mode 100644 index 0000000000..3ab3e8b9fd --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa.java @@ -0,0 +1,60 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.iam.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa { + /** + * @return PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + */ + private String pemCertificate; + + private GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa() {} + /** + * @return PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + */ + public String pemCertificate() { + return this.pemCertificate; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String pemCertificate; + public Builder() {} + public Builder(GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa defaults) { + Objects.requireNonNull(defaults); + this.pemCertificate = defaults.pemCertificate; + } + + @CustomType.Setter + public Builder pemCertificate(String pemCertificate) { + if (pemCertificate == null) { + throw new MissingRequiredPropertyException("GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa", "pemCertificate"); + } + this.pemCertificate = pemCertificate; + return this; + } + public GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa build() { + final var _resultValue = new GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa(); + _resultValue.pemCertificate = pemCertificate; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor.java new file mode 100644 index 0000000000..2e90f4d7a2 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor.java @@ -0,0 +1,60 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.iam.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor { + /** + * @return PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + */ + private String pemCertificate; + + private GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor() {} + /** + * @return PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + */ + public String pemCertificate() { + return this.pemCertificate; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String pemCertificate; + public Builder() {} + public Builder(GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor defaults) { + Objects.requireNonNull(defaults); + this.pemCertificate = defaults.pemCertificate; + } + + @CustomType.Setter + public Builder pemCertificate(String pemCertificate) { + if (pemCertificate == null) { + throw new MissingRequiredPropertyException("GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor", "pemCertificate"); + } + this.pemCertificate = pemCertificate; + return this; + } + public GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor build() { + final var _resultValue = new GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor(); + _resultValue.pemCertificate = pemCertificate; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderSaml.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderSaml.java index 449415eca7..0a15156efd 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderSaml.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderSaml.java @@ -13,6 +13,8 @@ public final class WorkloadIdentityPoolProviderSaml { /** * @return SAML Identity provider configuration metadata xml doc. * + * <a name="nested_x509"></a>The `x509` block supports: + * */ private String idpMetadataXml; @@ -20,6 +22,8 @@ private WorkloadIdentityPoolProviderSaml() {} /** * @return SAML Identity provider configuration metadata xml doc. * + * <a name="nested_x509"></a>The `x509` block supports: + * */ public String idpMetadataXml() { return this.idpMetadataXml; diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509.java new file mode 100644 index 0000000000..1964bbd526 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509.java @@ -0,0 +1,66 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.iam.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.iam.outputs.WorkloadIdentityPoolProviderX509TrustStore; +import java.util.Objects; + +@CustomType +public final class WorkloadIdentityPoolProviderX509 { + /** + * @return A Trust store, use this trust store as a wrapper to config the trust + * anchor and optional intermediate cas to help build the trust chain for + * the incoming end entity certificate. Follow the x509 guidelines to + * define those PEM encoded certs. Only 1 trust store is currently + * supported. + * + */ + private WorkloadIdentityPoolProviderX509TrustStore trustStore; + + private WorkloadIdentityPoolProviderX509() {} + /** + * @return A Trust store, use this trust store as a wrapper to config the trust + * anchor and optional intermediate cas to help build the trust chain for + * the incoming end entity certificate. Follow the x509 guidelines to + * define those PEM encoded certs. Only 1 trust store is currently + * supported. + * + */ + public WorkloadIdentityPoolProviderX509TrustStore trustStore() { + return this.trustStore; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(WorkloadIdentityPoolProviderX509 defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private WorkloadIdentityPoolProviderX509TrustStore trustStore; + public Builder() {} + public Builder(WorkloadIdentityPoolProviderX509 defaults) { + Objects.requireNonNull(defaults); + this.trustStore = defaults.trustStore; + } + + @CustomType.Setter + public Builder trustStore(WorkloadIdentityPoolProviderX509TrustStore trustStore) { + if (trustStore == null) { + throw new MissingRequiredPropertyException("WorkloadIdentityPoolProviderX509", "trustStore"); + } + this.trustStore = trustStore; + return this; + } + public WorkloadIdentityPoolProviderX509 build() { + final var _resultValue = new WorkloadIdentityPoolProviderX509(); + _resultValue.trustStore = trustStore; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509TrustStore.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509TrustStore.java new file mode 100644 index 0000000000..b8299fb95e --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509TrustStore.java @@ -0,0 +1,100 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.iam.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.iam.outputs.WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa; +import com.pulumi.gcp.iam.outputs.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor; +import java.util.List; +import java.util.Objects; +import javax.annotation.Nullable; + +@CustomType +public final class WorkloadIdentityPoolProviderX509TrustStore { + /** + * @return Set of intermediate CA certificates used for building the trust chain to + * trust anchor. + * IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + * Structure is documented below. + * + */ + private @Nullable List intermediateCas; + /** + * @return List of Trust Anchors to be used while performing validation + * against a given TrustStore. The incoming end entity's certificate + * must be chained up to one of the trust anchors here. + * Structure is documented below. + * + */ + private List trustAnchors; + + private WorkloadIdentityPoolProviderX509TrustStore() {} + /** + * @return Set of intermediate CA certificates used for building the trust chain to + * trust anchor. + * IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + * Structure is documented below. + * + */ + public List intermediateCas() { + return this.intermediateCas == null ? List.of() : this.intermediateCas; + } + /** + * @return List of Trust Anchors to be used while performing validation + * against a given TrustStore. The incoming end entity's certificate + * must be chained up to one of the trust anchors here. + * Structure is documented below. + * + */ + public List trustAnchors() { + return this.trustAnchors; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(WorkloadIdentityPoolProviderX509TrustStore defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable List intermediateCas; + private List trustAnchors; + public Builder() {} + public Builder(WorkloadIdentityPoolProviderX509TrustStore defaults) { + Objects.requireNonNull(defaults); + this.intermediateCas = defaults.intermediateCas; + this.trustAnchors = defaults.trustAnchors; + } + + @CustomType.Setter + public Builder intermediateCas(@Nullable List intermediateCas) { + + this.intermediateCas = intermediateCas; + return this; + } + public Builder intermediateCas(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa... intermediateCas) { + return intermediateCas(List.of(intermediateCas)); + } + @CustomType.Setter + public Builder trustAnchors(List trustAnchors) { + if (trustAnchors == null) { + throw new MissingRequiredPropertyException("WorkloadIdentityPoolProviderX509TrustStore", "trustAnchors"); + } + this.trustAnchors = trustAnchors; + return this; + } + public Builder trustAnchors(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor... trustAnchors) { + return trustAnchors(List.of(trustAnchors)); + } + public WorkloadIdentityPoolProviderX509TrustStore build() { + final var _resultValue = new WorkloadIdentityPoolProviderX509TrustStore(); + _resultValue.intermediateCas = intermediateCas; + _resultValue.trustAnchors = trustAnchors; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa.java new file mode 100644 index 0000000000..b8ae80662e --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa.java @@ -0,0 +1,59 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.iam.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa { + /** + * @return PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + */ + private @Nullable String pemCertificate; + + private WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa() {} + /** + * @return PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + */ + public Optional pemCertificate() { + return Optional.ofNullable(this.pemCertificate); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String pemCertificate; + public Builder() {} + public Builder(WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa defaults) { + Objects.requireNonNull(defaults); + this.pemCertificate = defaults.pemCertificate; + } + + @CustomType.Setter + public Builder pemCertificate(@Nullable String pemCertificate) { + + this.pemCertificate = pemCertificate; + return this; + } + public WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa build() { + final var _resultValue = new WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa(); + _resultValue.pemCertificate = pemCertificate; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor.java b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor.java new file mode 100644 index 0000000000..be0a230bd4 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/iam/outputs/WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor.java @@ -0,0 +1,59 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.iam.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor { + /** + * @return PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + */ + private @Nullable String pemCertificate; + + private WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor() {} + /** + * @return PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + * + */ + public Optional pemCertificate() { + return Optional.ofNullable(this.pemCertificate); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String pemCertificate; + public Builder() {} + public Builder(WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor defaults) { + Objects.requireNonNull(defaults); + this.pemCertificate = defaults.pemCertificate; + } + + @CustomType.Setter + public Builder pemCertificate(@Nullable String pemCertificate) { + + this.pemCertificate = pemCertificate; + return this; + } + public WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor build() { + final var _resultValue = new WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor(); + _resultValue.pemCertificate = pemCertificate; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/kms/AutokeyConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/kms/AutokeyConfig.java index 4ec0fb3ad0..8437c3e8cb 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/kms/AutokeyConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/kms/AutokeyConfig.java @@ -122,12 +122,20 @@ * .build()); * * var example_autokeyconfig = new AutokeyConfig("example-autokeyconfig", AutokeyConfigArgs.builder() - * .folder(autokmsFolder.folderId()) + * .folder(autokmsFolder.id()) * .keyProject(keyProject.projectId().applyValue(projectId -> String.format("projects/%s", projectId))) * .build(), CustomResourceOptions.builder() * .dependsOn(waitSrvAccPermissions) * .build()); * + * // Wait delay after setting AutokeyConfig, to prevent diffs on reapply, + * // because setting the config takes a little to fully propagate. + * var waitAutokeyPropagation = new Sleep("waitAutokeyPropagation", SleepArgs.builder() + * .createDuration("30s") + * .build(), CustomResourceOptions.builder() + * .dependsOn(example_autokeyconfig) + * .build()); + * * }}{@code * }}{@code * } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/kms/KmsFunctions.java b/sdk/java/src/main/java/com/pulumi/gcp/kms/KmsFunctions.java index e687d006ce..8acfabb855 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/kms/KmsFunctions.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/kms/KmsFunctions.java @@ -10,6 +10,10 @@ import com.pulumi.gcp.Utilities; import com.pulumi.gcp.kms.inputs.GetCryptoKeyIamPolicyArgs; import com.pulumi.gcp.kms.inputs.GetCryptoKeyIamPolicyPlainArgs; +import com.pulumi.gcp.kms.inputs.GetCryptoKeyLatestVersionArgs; +import com.pulumi.gcp.kms.inputs.GetCryptoKeyLatestVersionPlainArgs; +import com.pulumi.gcp.kms.inputs.GetCryptoKeyVersionsArgs; +import com.pulumi.gcp.kms.inputs.GetCryptoKeyVersionsPlainArgs; import com.pulumi.gcp.kms.inputs.GetCryptoKeysArgs; import com.pulumi.gcp.kms.inputs.GetCryptoKeysPlainArgs; import com.pulumi.gcp.kms.inputs.GetEkmConnectionIamPolicyArgs; @@ -31,6 +35,8 @@ import com.pulumi.gcp.kms.inputs.GetKeyRingsArgs; import com.pulumi.gcp.kms.inputs.GetKeyRingsPlainArgs; import com.pulumi.gcp.kms.outputs.GetCryptoKeyIamPolicyResult; +import com.pulumi.gcp.kms.outputs.GetCryptoKeyLatestVersionResult; +import com.pulumi.gcp.kms.outputs.GetCryptoKeyVersionsResult; import com.pulumi.gcp.kms.outputs.GetCryptoKeysResult; import com.pulumi.gcp.kms.outputs.GetEkmConnectionIamPolicyResult; import com.pulumi.gcp.kms.outputs.GetKMSCryptoKeyResult; @@ -212,6 +218,462 @@ public static Output getCryptoKeyIamPolicy(GetCrypt public static CompletableFuture getCryptoKeyIamPolicyPlain(GetCryptoKeyIamPolicyPlainArgs args, InvokeOptions options) { return Deployment.getInstance().invokeAsync("gcp:kms/getCryptoKeyIamPolicy:getCryptoKeyIamPolicy", TypeShape.of(GetCryptoKeyIamPolicyResult.class), args, Utilities.withVersion(options)); } + /** + * Provides access to the latest Google Cloud Platform KMS CryptoKeyVersion in a CryptoKey. For more information see + * [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + * and + * [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.kms.KmsFunctions;
+     * import com.pulumi.gcp.kms.inputs.GetKMSKeyRingArgs;
+     * import com.pulumi.gcp.kms.inputs.GetKMSCryptoKeyArgs;
+     * import com.pulumi.gcp.kms.inputs.GetCryptoKeyLatestVersionArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var myKeyRing = KmsFunctions.getKMSKeyRing(GetKMSKeyRingArgs.builder()
+     *             .name("my-key-ring")
+     *             .location("us-central1")
+     *             .build());
+     * 
+     *         final var myCryptoKey = KmsFunctions.getKMSCryptoKey(GetKMSCryptoKeyArgs.builder()
+     *             .name("my-crypto-key")
+     *             .keyRing(myKeyRing.applyValue(getKMSKeyRingResult -> getKMSKeyRingResult.id()))
+     *             .build());
+     * 
+     *         final var myCryptoKeyLatestVersion = KmsFunctions.getCryptoKeyLatestVersion(GetCryptoKeyLatestVersionArgs.builder()
+     *             .cryptoKey(myKey.id())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getCryptoKeyLatestVersion(GetCryptoKeyLatestVersionArgs args) { + return getCryptoKeyLatestVersion(args, InvokeOptions.Empty); + } + /** + * Provides access to the latest Google Cloud Platform KMS CryptoKeyVersion in a CryptoKey. For more information see + * [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + * and + * [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.kms.KmsFunctions;
+     * import com.pulumi.gcp.kms.inputs.GetKMSKeyRingArgs;
+     * import com.pulumi.gcp.kms.inputs.GetKMSCryptoKeyArgs;
+     * import com.pulumi.gcp.kms.inputs.GetCryptoKeyLatestVersionArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var myKeyRing = KmsFunctions.getKMSKeyRing(GetKMSKeyRingArgs.builder()
+     *             .name("my-key-ring")
+     *             .location("us-central1")
+     *             .build());
+     * 
+     *         final var myCryptoKey = KmsFunctions.getKMSCryptoKey(GetKMSCryptoKeyArgs.builder()
+     *             .name("my-crypto-key")
+     *             .keyRing(myKeyRing.applyValue(getKMSKeyRingResult -> getKMSKeyRingResult.id()))
+     *             .build());
+     * 
+     *         final var myCryptoKeyLatestVersion = KmsFunctions.getCryptoKeyLatestVersion(GetCryptoKeyLatestVersionArgs.builder()
+     *             .cryptoKey(myKey.id())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static CompletableFuture getCryptoKeyLatestVersionPlain(GetCryptoKeyLatestVersionPlainArgs args) { + return getCryptoKeyLatestVersionPlain(args, InvokeOptions.Empty); + } + /** + * Provides access to the latest Google Cloud Platform KMS CryptoKeyVersion in a CryptoKey. For more information see + * [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + * and + * [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.kms.KmsFunctions;
+     * import com.pulumi.gcp.kms.inputs.GetKMSKeyRingArgs;
+     * import com.pulumi.gcp.kms.inputs.GetKMSCryptoKeyArgs;
+     * import com.pulumi.gcp.kms.inputs.GetCryptoKeyLatestVersionArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var myKeyRing = KmsFunctions.getKMSKeyRing(GetKMSKeyRingArgs.builder()
+     *             .name("my-key-ring")
+     *             .location("us-central1")
+     *             .build());
+     * 
+     *         final var myCryptoKey = KmsFunctions.getKMSCryptoKey(GetKMSCryptoKeyArgs.builder()
+     *             .name("my-crypto-key")
+     *             .keyRing(myKeyRing.applyValue(getKMSKeyRingResult -> getKMSKeyRingResult.id()))
+     *             .build());
+     * 
+     *         final var myCryptoKeyLatestVersion = KmsFunctions.getCryptoKeyLatestVersion(GetCryptoKeyLatestVersionArgs.builder()
+     *             .cryptoKey(myKey.id())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getCryptoKeyLatestVersion(GetCryptoKeyLatestVersionArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("gcp:kms/getCryptoKeyLatestVersion:getCryptoKeyLatestVersion", TypeShape.of(GetCryptoKeyLatestVersionResult.class), args, Utilities.withVersion(options)); + } + /** + * Provides access to the latest Google Cloud Platform KMS CryptoKeyVersion in a CryptoKey. For more information see + * [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + * and + * [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.kms.KmsFunctions;
+     * import com.pulumi.gcp.kms.inputs.GetKMSKeyRingArgs;
+     * import com.pulumi.gcp.kms.inputs.GetKMSCryptoKeyArgs;
+     * import com.pulumi.gcp.kms.inputs.GetCryptoKeyLatestVersionArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var myKeyRing = KmsFunctions.getKMSKeyRing(GetKMSKeyRingArgs.builder()
+     *             .name("my-key-ring")
+     *             .location("us-central1")
+     *             .build());
+     * 
+     *         final var myCryptoKey = KmsFunctions.getKMSCryptoKey(GetKMSCryptoKeyArgs.builder()
+     *             .name("my-crypto-key")
+     *             .keyRing(myKeyRing.applyValue(getKMSKeyRingResult -> getKMSKeyRingResult.id()))
+     *             .build());
+     * 
+     *         final var myCryptoKeyLatestVersion = KmsFunctions.getCryptoKeyLatestVersion(GetCryptoKeyLatestVersionArgs.builder()
+     *             .cryptoKey(myKey.id())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static CompletableFuture getCryptoKeyLatestVersionPlain(GetCryptoKeyLatestVersionPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("gcp:kms/getCryptoKeyLatestVersion:getCryptoKeyLatestVersion", TypeShape.of(GetCryptoKeyLatestVersionResult.class), args, Utilities.withVersion(options)); + } + /** + * Provides access to Google Cloud Platform KMS CryptoKeyVersions. For more information see + * [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + * and + * [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.kms.KmsFunctions;
+     * import com.pulumi.gcp.kms.inputs.GetKMSKeyRingArgs;
+     * import com.pulumi.gcp.kms.inputs.GetKMSCryptoKeyArgs;
+     * import com.pulumi.gcp.kms.inputs.GetCryptoKeyVersionsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var myKeyRing = KmsFunctions.getKMSKeyRing(GetKMSKeyRingArgs.builder()
+     *             .name("my-key-ring")
+     *             .location("us-central1")
+     *             .build());
+     * 
+     *         final var myCryptoKey = KmsFunctions.getKMSCryptoKey(GetKMSCryptoKeyArgs.builder()
+     *             .name("my-crypto-key")
+     *             .keyRing(myKeyRing.applyValue(getKMSKeyRingResult -> getKMSKeyRingResult.id()))
+     *             .build());
+     * 
+     *         final var myCryptoKeyVersions = KmsFunctions.getCryptoKeyVersions(GetCryptoKeyVersionsArgs.builder()
+     *             .cryptoKey(myKey.id())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getCryptoKeyVersions(GetCryptoKeyVersionsArgs args) { + return getCryptoKeyVersions(args, InvokeOptions.Empty); + } + /** + * Provides access to Google Cloud Platform KMS CryptoKeyVersions. For more information see + * [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + * and + * [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.kms.KmsFunctions;
+     * import com.pulumi.gcp.kms.inputs.GetKMSKeyRingArgs;
+     * import com.pulumi.gcp.kms.inputs.GetKMSCryptoKeyArgs;
+     * import com.pulumi.gcp.kms.inputs.GetCryptoKeyVersionsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var myKeyRing = KmsFunctions.getKMSKeyRing(GetKMSKeyRingArgs.builder()
+     *             .name("my-key-ring")
+     *             .location("us-central1")
+     *             .build());
+     * 
+     *         final var myCryptoKey = KmsFunctions.getKMSCryptoKey(GetKMSCryptoKeyArgs.builder()
+     *             .name("my-crypto-key")
+     *             .keyRing(myKeyRing.applyValue(getKMSKeyRingResult -> getKMSKeyRingResult.id()))
+     *             .build());
+     * 
+     *         final var myCryptoKeyVersions = KmsFunctions.getCryptoKeyVersions(GetCryptoKeyVersionsArgs.builder()
+     *             .cryptoKey(myKey.id())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static CompletableFuture getCryptoKeyVersionsPlain(GetCryptoKeyVersionsPlainArgs args) { + return getCryptoKeyVersionsPlain(args, InvokeOptions.Empty); + } + /** + * Provides access to Google Cloud Platform KMS CryptoKeyVersions. For more information see + * [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + * and + * [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.kms.KmsFunctions;
+     * import com.pulumi.gcp.kms.inputs.GetKMSKeyRingArgs;
+     * import com.pulumi.gcp.kms.inputs.GetKMSCryptoKeyArgs;
+     * import com.pulumi.gcp.kms.inputs.GetCryptoKeyVersionsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var myKeyRing = KmsFunctions.getKMSKeyRing(GetKMSKeyRingArgs.builder()
+     *             .name("my-key-ring")
+     *             .location("us-central1")
+     *             .build());
+     * 
+     *         final var myCryptoKey = KmsFunctions.getKMSCryptoKey(GetKMSCryptoKeyArgs.builder()
+     *             .name("my-crypto-key")
+     *             .keyRing(myKeyRing.applyValue(getKMSKeyRingResult -> getKMSKeyRingResult.id()))
+     *             .build());
+     * 
+     *         final var myCryptoKeyVersions = KmsFunctions.getCryptoKeyVersions(GetCryptoKeyVersionsArgs.builder()
+     *             .cryptoKey(myKey.id())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static Output getCryptoKeyVersions(GetCryptoKeyVersionsArgs args, InvokeOptions options) { + return Deployment.getInstance().invoke("gcp:kms/getCryptoKeyVersions:getCryptoKeyVersions", TypeShape.of(GetCryptoKeyVersionsResult.class), args, Utilities.withVersion(options)); + } + /** + * Provides access to Google Cloud Platform KMS CryptoKeyVersions. For more information see + * [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + * and + * [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+     * {@code
+     * package generated_program;
+     * 
+     * import com.pulumi.Context;
+     * import com.pulumi.Pulumi;
+     * import com.pulumi.core.Output;
+     * import com.pulumi.gcp.kms.KmsFunctions;
+     * import com.pulumi.gcp.kms.inputs.GetKMSKeyRingArgs;
+     * import com.pulumi.gcp.kms.inputs.GetKMSCryptoKeyArgs;
+     * import com.pulumi.gcp.kms.inputs.GetCryptoKeyVersionsArgs;
+     * import java.util.List;
+     * import java.util.ArrayList;
+     * import java.util.Map;
+     * import java.io.File;
+     * import java.nio.file.Files;
+     * import java.nio.file.Paths;
+     * 
+     * public class App {
+     *     public static void main(String[] args) {
+     *         Pulumi.run(App::stack);
+     *     }
+     * 
+     *     public static void stack(Context ctx) {
+     *         final var myKeyRing = KmsFunctions.getKMSKeyRing(GetKMSKeyRingArgs.builder()
+     *             .name("my-key-ring")
+     *             .location("us-central1")
+     *             .build());
+     * 
+     *         final var myCryptoKey = KmsFunctions.getKMSCryptoKey(GetKMSCryptoKeyArgs.builder()
+     *             .name("my-crypto-key")
+     *             .keyRing(myKeyRing.applyValue(getKMSKeyRingResult -> getKMSKeyRingResult.id()))
+     *             .build());
+     * 
+     *         final var myCryptoKeyVersions = KmsFunctions.getCryptoKeyVersions(GetCryptoKeyVersionsArgs.builder()
+     *             .cryptoKey(myKey.id())
+     *             .build());
+     * 
+     *     }
+     * }
+     * }
+     * 
+ * <!--End PulumiCodeChooser --> + * + */ + public static CompletableFuture getCryptoKeyVersionsPlain(GetCryptoKeyVersionsPlainArgs args, InvokeOptions options) { + return Deployment.getInstance().invokeAsync("gcp:kms/getCryptoKeyVersions:getCryptoKeyVersions", TypeShape.of(GetCryptoKeyVersionsResult.class), args, Utilities.withVersion(options)); + } /** * Provides access to all Google Cloud Platform KMS CryptoKeys in a given KeyRing. For more information see * [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key) diff --git a/sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyLatestVersionArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyLatestVersionArgs.java new file mode 100644 index 0000000000..068cb0d70d --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyLatestVersionArgs.java @@ -0,0 +1,152 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.kms.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetCryptoKeyLatestVersionArgs extends com.pulumi.resources.InvokeArgs { + + public static final GetCryptoKeyLatestVersionArgs Empty = new GetCryptoKeyLatestVersionArgs(); + + /** + * The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + */ + @Import(name="cryptoKey", required=true) + private Output cryptoKey; + + /** + * @return The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + */ + public Output cryptoKey() { + return this.cryptoKey; + } + + /** + * The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on state. + * + * * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + * + */ + @Import(name="filter") + private @Nullable Output filter; + + /** + * @return The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on state. + * + * * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + * + */ + public Optional> filter() { + return Optional.ofNullable(this.filter); + } + + private GetCryptoKeyLatestVersionArgs() {} + + private GetCryptoKeyLatestVersionArgs(GetCryptoKeyLatestVersionArgs $) { + this.cryptoKey = $.cryptoKey; + this.filter = $.filter; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetCryptoKeyLatestVersionArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetCryptoKeyLatestVersionArgs $; + + public Builder() { + $ = new GetCryptoKeyLatestVersionArgs(); + } + + public Builder(GetCryptoKeyLatestVersionArgs defaults) { + $ = new GetCryptoKeyLatestVersionArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param cryptoKey The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + * @return builder + * + */ + public Builder cryptoKey(Output cryptoKey) { + $.cryptoKey = cryptoKey; + return this; + } + + /** + * @param cryptoKey The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + * @return builder + * + */ + public Builder cryptoKey(String cryptoKey) { + return cryptoKey(Output.of(cryptoKey)); + } + + /** + * @param filter The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on state. + * + * * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + * + * @return builder + * + */ + public Builder filter(@Nullable Output filter) { + $.filter = filter; + return this; + } + + /** + * @param filter The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on state. + * + * * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + * + * @return builder + * + */ + public Builder filter(String filter) { + return filter(Output.of(filter)); + } + + public GetCryptoKeyLatestVersionArgs build() { + if ($.cryptoKey == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyLatestVersionArgs", "cryptoKey"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyLatestVersionPlainArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyLatestVersionPlainArgs.java new file mode 100644 index 0000000000..2a981f0e56 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyLatestVersionPlainArgs.java @@ -0,0 +1,124 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.kms.inputs; + +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetCryptoKeyLatestVersionPlainArgs extends com.pulumi.resources.InvokeArgs { + + public static final GetCryptoKeyLatestVersionPlainArgs Empty = new GetCryptoKeyLatestVersionPlainArgs(); + + /** + * The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + */ + @Import(name="cryptoKey", required=true) + private String cryptoKey; + + /** + * @return The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + */ + public String cryptoKey() { + return this.cryptoKey; + } + + /** + * The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on state. + * + * * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + * + */ + @Import(name="filter") + private @Nullable String filter; + + /** + * @return The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on state. + * + * * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + * + */ + public Optional filter() { + return Optional.ofNullable(this.filter); + } + + private GetCryptoKeyLatestVersionPlainArgs() {} + + private GetCryptoKeyLatestVersionPlainArgs(GetCryptoKeyLatestVersionPlainArgs $) { + this.cryptoKey = $.cryptoKey; + this.filter = $.filter; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetCryptoKeyLatestVersionPlainArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetCryptoKeyLatestVersionPlainArgs $; + + public Builder() { + $ = new GetCryptoKeyLatestVersionPlainArgs(); + } + + public Builder(GetCryptoKeyLatestVersionPlainArgs defaults) { + $ = new GetCryptoKeyLatestVersionPlainArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param cryptoKey The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + * @return builder + * + */ + public Builder cryptoKey(String cryptoKey) { + $.cryptoKey = cryptoKey; + return this; + } + + /** + * @param filter The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on state. + * + * * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + * + * @return builder + * + */ + public Builder filter(@Nullable String filter) { + $.filter = filter; + return this; + } + + public GetCryptoKeyLatestVersionPlainArgs build() { + if ($.cryptoKey == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyLatestVersionPlainArgs", "cryptoKey"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyVersionsArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyVersionsArgs.java new file mode 100644 index 0000000000..832c8ad7d6 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyVersionsArgs.java @@ -0,0 +1,156 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.kms.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetCryptoKeyVersionsArgs extends com.pulumi.resources.InvokeArgs { + + public static final GetCryptoKeyVersionsArgs Empty = new GetCryptoKeyVersionsArgs(); + + /** + * The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + */ + @Import(name="cryptoKey", required=true) + private Output cryptoKey; + + /** + * @return The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + */ + public Output cryptoKey() { + return this.cryptoKey; + } + + /** + * The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + * + * * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + * * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + * + */ + @Import(name="filter") + private @Nullable Output filter; + + /** + * @return The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + * + * * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + * * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + * + */ + public Optional> filter() { + return Optional.ofNullable(this.filter); + } + + private GetCryptoKeyVersionsArgs() {} + + private GetCryptoKeyVersionsArgs(GetCryptoKeyVersionsArgs $) { + this.cryptoKey = $.cryptoKey; + this.filter = $.filter; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetCryptoKeyVersionsArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetCryptoKeyVersionsArgs $; + + public Builder() { + $ = new GetCryptoKeyVersionsArgs(); + } + + public Builder(GetCryptoKeyVersionsArgs defaults) { + $ = new GetCryptoKeyVersionsArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param cryptoKey The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + * @return builder + * + */ + public Builder cryptoKey(Output cryptoKey) { + $.cryptoKey = cryptoKey; + return this; + } + + /** + * @param cryptoKey The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + * @return builder + * + */ + public Builder cryptoKey(String cryptoKey) { + return cryptoKey(Output.of(cryptoKey)); + } + + /** + * @param filter The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + * + * * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + * * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + * + * @return builder + * + */ + public Builder filter(@Nullable Output filter) { + $.filter = filter; + return this; + } + + /** + * @param filter The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + * + * * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + * * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + * + * @return builder + * + */ + public Builder filter(String filter) { + return filter(Output.of(filter)); + } + + public GetCryptoKeyVersionsArgs build() { + if ($.cryptoKey == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsArgs", "cryptoKey"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyVersionsPlainArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyVersionsPlainArgs.java new file mode 100644 index 0000000000..45cea86269 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/kms/inputs/GetCryptoKeyVersionsPlainArgs.java @@ -0,0 +1,127 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.kms.inputs; + +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class GetCryptoKeyVersionsPlainArgs extends com.pulumi.resources.InvokeArgs { + + public static final GetCryptoKeyVersionsPlainArgs Empty = new GetCryptoKeyVersionsPlainArgs(); + + /** + * The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + */ + @Import(name="cryptoKey", required=true) + private String cryptoKey; + + /** + * @return The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + */ + public String cryptoKey() { + return this.cryptoKey; + } + + /** + * The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + * + * * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + * * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + * + */ + @Import(name="filter") + private @Nullable String filter; + + /** + * @return The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + * + * * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + * * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + * + */ + public Optional filter() { + return Optional.ofNullable(this.filter); + } + + private GetCryptoKeyVersionsPlainArgs() {} + + private GetCryptoKeyVersionsPlainArgs(GetCryptoKeyVersionsPlainArgs $) { + this.cryptoKey = $.cryptoKey; + this.filter = $.filter; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(GetCryptoKeyVersionsPlainArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private GetCryptoKeyVersionsPlainArgs $; + + public Builder() { + $ = new GetCryptoKeyVersionsPlainArgs(); + } + + public Builder(GetCryptoKeyVersionsPlainArgs defaults) { + $ = new GetCryptoKeyVersionsPlainArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param cryptoKey The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + * @return builder + * + */ + public Builder cryptoKey(String cryptoKey) { + $.cryptoKey = cryptoKey; + return this; + } + + /** + * @param filter The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + * + * * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + * * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + * + * @return builder + * + */ + public Builder filter(@Nullable String filter) { + $.filter = filter; + return this; + } + + public GetCryptoKeyVersionsPlainArgs build() { + if ($.cryptoKey == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsPlainArgs", "cryptoKey"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyLatestVersionPublicKey.java b/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyLatestVersionPublicKey.java new file mode 100644 index 0000000000..034ce14306 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyLatestVersionPublicKey.java @@ -0,0 +1,81 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.kms.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetCryptoKeyLatestVersionPublicKey { + /** + * @return The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + * + */ + private String algorithm; + /** + * @return The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + * + */ + private String pem; + + private GetCryptoKeyLatestVersionPublicKey() {} + /** + * @return The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + * + */ + public String algorithm() { + return this.algorithm; + } + /** + * @return The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + * + */ + public String pem() { + return this.pem; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetCryptoKeyLatestVersionPublicKey defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String algorithm; + private String pem; + public Builder() {} + public Builder(GetCryptoKeyLatestVersionPublicKey defaults) { + Objects.requireNonNull(defaults); + this.algorithm = defaults.algorithm; + this.pem = defaults.pem; + } + + @CustomType.Setter + public Builder algorithm(String algorithm) { + if (algorithm == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyLatestVersionPublicKey", "algorithm"); + } + this.algorithm = algorithm; + return this; + } + @CustomType.Setter + public Builder pem(String pem) { + if (pem == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyLatestVersionPublicKey", "pem"); + } + this.pem = pem; + return this; + } + public GetCryptoKeyLatestVersionPublicKey build() { + final var _resultValue = new GetCryptoKeyLatestVersionPublicKey(); + _resultValue.algorithm = algorithm; + _resultValue.pem = pem; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyLatestVersionResult.java b/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyLatestVersionResult.java new file mode 100644 index 0000000000..650b9e52f8 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyLatestVersionResult.java @@ -0,0 +1,216 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.kms.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.kms.outputs.GetCryptoKeyLatestVersionPublicKey; +import java.lang.Integer; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class GetCryptoKeyLatestVersionResult { + /** + * @return The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + * + */ + private String algorithm; + private String cryptoKey; + private @Nullable String filter; + /** + * @return The provider-assigned unique ID for this managed resource. + * + */ + private String id; + private String name; + /** + * @return The ProtectionLevel describing how crypto operations are performed with this CryptoKeyVersion. See the [protection_level reference](https://cloud.google.com/kms/docs/reference/rest/v1/ProtectionLevel) for possible outputs. + * + */ + private String protectionLevel; + /** + * @return If the enclosing CryptoKey has purpose `ASYMMETRIC_SIGN` or `ASYMMETRIC_DECRYPT`, this block contains details about the public key associated to this CryptoKeyVersion. Structure is documented below. + * + */ + private List publicKeys; + /** + * @return The current state of the latest CryptoKeyVersion. See the [state reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions#CryptoKeyVersion.CryptoKeyVersionState) for possible outputs. + * + */ + private String state; + private Integer version; + + private GetCryptoKeyLatestVersionResult() {} + /** + * @return The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + * + */ + public String algorithm() { + return this.algorithm; + } + public String cryptoKey() { + return this.cryptoKey; + } + public Optional filter() { + return Optional.ofNullable(this.filter); + } + /** + * @return The provider-assigned unique ID for this managed resource. + * + */ + public String id() { + return this.id; + } + public String name() { + return this.name; + } + /** + * @return The ProtectionLevel describing how crypto operations are performed with this CryptoKeyVersion. See the [protection_level reference](https://cloud.google.com/kms/docs/reference/rest/v1/ProtectionLevel) for possible outputs. + * + */ + public String protectionLevel() { + return this.protectionLevel; + } + /** + * @return If the enclosing CryptoKey has purpose `ASYMMETRIC_SIGN` or `ASYMMETRIC_DECRYPT`, this block contains details about the public key associated to this CryptoKeyVersion. Structure is documented below. + * + */ + public List publicKeys() { + return this.publicKeys; + } + /** + * @return The current state of the latest CryptoKeyVersion. See the [state reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions#CryptoKeyVersion.CryptoKeyVersionState) for possible outputs. + * + */ + public String state() { + return this.state; + } + public Integer version() { + return this.version; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetCryptoKeyLatestVersionResult defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String algorithm; + private String cryptoKey; + private @Nullable String filter; + private String id; + private String name; + private String protectionLevel; + private List publicKeys; + private String state; + private Integer version; + public Builder() {} + public Builder(GetCryptoKeyLatestVersionResult defaults) { + Objects.requireNonNull(defaults); + this.algorithm = defaults.algorithm; + this.cryptoKey = defaults.cryptoKey; + this.filter = defaults.filter; + this.id = defaults.id; + this.name = defaults.name; + this.protectionLevel = defaults.protectionLevel; + this.publicKeys = defaults.publicKeys; + this.state = defaults.state; + this.version = defaults.version; + } + + @CustomType.Setter + public Builder algorithm(String algorithm) { + if (algorithm == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyLatestVersionResult", "algorithm"); + } + this.algorithm = algorithm; + return this; + } + @CustomType.Setter + public Builder cryptoKey(String cryptoKey) { + if (cryptoKey == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyLatestVersionResult", "cryptoKey"); + } + this.cryptoKey = cryptoKey; + return this; + } + @CustomType.Setter + public Builder filter(@Nullable String filter) { + + this.filter = filter; + return this; + } + @CustomType.Setter + public Builder id(String id) { + if (id == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyLatestVersionResult", "id"); + } + this.id = id; + return this; + } + @CustomType.Setter + public Builder name(String name) { + if (name == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyLatestVersionResult", "name"); + } + this.name = name; + return this; + } + @CustomType.Setter + public Builder protectionLevel(String protectionLevel) { + if (protectionLevel == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyLatestVersionResult", "protectionLevel"); + } + this.protectionLevel = protectionLevel; + return this; + } + @CustomType.Setter + public Builder publicKeys(List publicKeys) { + if (publicKeys == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyLatestVersionResult", "publicKeys"); + } + this.publicKeys = publicKeys; + return this; + } + public Builder publicKeys(GetCryptoKeyLatestVersionPublicKey... publicKeys) { + return publicKeys(List.of(publicKeys)); + } + @CustomType.Setter + public Builder state(String state) { + if (state == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyLatestVersionResult", "state"); + } + this.state = state; + return this; + } + @CustomType.Setter + public Builder version(Integer version) { + if (version == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyLatestVersionResult", "version"); + } + this.version = version; + return this; + } + public GetCryptoKeyLatestVersionResult build() { + final var _resultValue = new GetCryptoKeyLatestVersionResult(); + _resultValue.algorithm = algorithm; + _resultValue.cryptoKey = cryptoKey; + _resultValue.filter = filter; + _resultValue.id = id; + _resultValue.name = name; + _resultValue.protectionLevel = protectionLevel; + _resultValue.publicKeys = publicKeys; + _resultValue.state = state; + _resultValue.version = version; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsPublicKey.java b/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsPublicKey.java new file mode 100644 index 0000000000..a99ef5cb8a --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsPublicKey.java @@ -0,0 +1,81 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.kms.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetCryptoKeyVersionsPublicKey { + /** + * @return The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + * + */ + private String algorithm; + /** + * @return The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + * + */ + private String pem; + + private GetCryptoKeyVersionsPublicKey() {} + /** + * @return The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + * + */ + public String algorithm() { + return this.algorithm; + } + /** + * @return The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + * + */ + public String pem() { + return this.pem; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetCryptoKeyVersionsPublicKey defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String algorithm; + private String pem; + public Builder() {} + public Builder(GetCryptoKeyVersionsPublicKey defaults) { + Objects.requireNonNull(defaults); + this.algorithm = defaults.algorithm; + this.pem = defaults.pem; + } + + @CustomType.Setter + public Builder algorithm(String algorithm) { + if (algorithm == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsPublicKey", "algorithm"); + } + this.algorithm = algorithm; + return this; + } + @CustomType.Setter + public Builder pem(String pem) { + if (pem == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsPublicKey", "pem"); + } + this.pem = pem; + return this; + } + public GetCryptoKeyVersionsPublicKey build() { + final var _resultValue = new GetCryptoKeyVersionsPublicKey(); + _resultValue.algorithm = algorithm; + _resultValue.pem = pem; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsResult.java b/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsResult.java new file mode 100644 index 0000000000..bf1b95f9b9 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsResult.java @@ -0,0 +1,135 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.kms.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.kms.outputs.GetCryptoKeyVersionsPublicKey; +import com.pulumi.gcp.kms.outputs.GetCryptoKeyVersionsVersion; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class GetCryptoKeyVersionsResult { + private String cryptoKey; + private @Nullable String filter; + /** + * @return The provider-assigned unique ID for this managed resource. + * + */ + private String id; + private List publicKeys; + /** + * @return A list of all the retrieved crypto key versions from the provided crypto key. This list is influenced by the provided filter argument. + * + */ + private List versions; + + private GetCryptoKeyVersionsResult() {} + public String cryptoKey() { + return this.cryptoKey; + } + public Optional filter() { + return Optional.ofNullable(this.filter); + } + /** + * @return The provider-assigned unique ID for this managed resource. + * + */ + public String id() { + return this.id; + } + public List publicKeys() { + return this.publicKeys; + } + /** + * @return A list of all the retrieved crypto key versions from the provided crypto key. This list is influenced by the provided filter argument. + * + */ + public List versions() { + return this.versions; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetCryptoKeyVersionsResult defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String cryptoKey; + private @Nullable String filter; + private String id; + private List publicKeys; + private List versions; + public Builder() {} + public Builder(GetCryptoKeyVersionsResult defaults) { + Objects.requireNonNull(defaults); + this.cryptoKey = defaults.cryptoKey; + this.filter = defaults.filter; + this.id = defaults.id; + this.publicKeys = defaults.publicKeys; + this.versions = defaults.versions; + } + + @CustomType.Setter + public Builder cryptoKey(String cryptoKey) { + if (cryptoKey == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsResult", "cryptoKey"); + } + this.cryptoKey = cryptoKey; + return this; + } + @CustomType.Setter + public Builder filter(@Nullable String filter) { + + this.filter = filter; + return this; + } + @CustomType.Setter + public Builder id(String id) { + if (id == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsResult", "id"); + } + this.id = id; + return this; + } + @CustomType.Setter + public Builder publicKeys(List publicKeys) { + if (publicKeys == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsResult", "publicKeys"); + } + this.publicKeys = publicKeys; + return this; + } + public Builder publicKeys(GetCryptoKeyVersionsPublicKey... publicKeys) { + return publicKeys(List.of(publicKeys)); + } + @CustomType.Setter + public Builder versions(List versions) { + if (versions == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsResult", "versions"); + } + this.versions = versions; + return this; + } + public Builder versions(GetCryptoKeyVersionsVersion... versions) { + return versions(List.of(versions)); + } + public GetCryptoKeyVersionsResult build() { + final var _resultValue = new GetCryptoKeyVersionsResult(); + _resultValue.cryptoKey = cryptoKey; + _resultValue.filter = filter; + _resultValue.id = id; + _resultValue.publicKeys = publicKeys; + _resultValue.versions = versions; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsVersion.java b/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsVersion.java new file mode 100644 index 0000000000..d659bccd6f --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsVersion.java @@ -0,0 +1,179 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.kms.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.kms.outputs.GetCryptoKeyVersionsVersionPublicKey; +import java.lang.Integer; +import java.lang.String; +import java.util.List; +import java.util.Objects; + +@CustomType +public final class GetCryptoKeyVersionsVersion { + /** + * @return The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + * + */ + private String algorithm; + /** + * @return The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + */ + private String cryptoKey; + private String id; + private String name; + private String protectionLevel; + private List publicKeys; + private String state; + private Integer version; + + private GetCryptoKeyVersionsVersion() {} + /** + * @return The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + * + */ + public String algorithm() { + return this.algorithm; + } + /** + * @return The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + * + */ + public String cryptoKey() { + return this.cryptoKey; + } + public String id() { + return this.id; + } + public String name() { + return this.name; + } + public String protectionLevel() { + return this.protectionLevel; + } + public List publicKeys() { + return this.publicKeys; + } + public String state() { + return this.state; + } + public Integer version() { + return this.version; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetCryptoKeyVersionsVersion defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String algorithm; + private String cryptoKey; + private String id; + private String name; + private String protectionLevel; + private List publicKeys; + private String state; + private Integer version; + public Builder() {} + public Builder(GetCryptoKeyVersionsVersion defaults) { + Objects.requireNonNull(defaults); + this.algorithm = defaults.algorithm; + this.cryptoKey = defaults.cryptoKey; + this.id = defaults.id; + this.name = defaults.name; + this.protectionLevel = defaults.protectionLevel; + this.publicKeys = defaults.publicKeys; + this.state = defaults.state; + this.version = defaults.version; + } + + @CustomType.Setter + public Builder algorithm(String algorithm) { + if (algorithm == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsVersion", "algorithm"); + } + this.algorithm = algorithm; + return this; + } + @CustomType.Setter + public Builder cryptoKey(String cryptoKey) { + if (cryptoKey == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsVersion", "cryptoKey"); + } + this.cryptoKey = cryptoKey; + return this; + } + @CustomType.Setter + public Builder id(String id) { + if (id == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsVersion", "id"); + } + this.id = id; + return this; + } + @CustomType.Setter + public Builder name(String name) { + if (name == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsVersion", "name"); + } + this.name = name; + return this; + } + @CustomType.Setter + public Builder protectionLevel(String protectionLevel) { + if (protectionLevel == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsVersion", "protectionLevel"); + } + this.protectionLevel = protectionLevel; + return this; + } + @CustomType.Setter + public Builder publicKeys(List publicKeys) { + if (publicKeys == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsVersion", "publicKeys"); + } + this.publicKeys = publicKeys; + return this; + } + public Builder publicKeys(GetCryptoKeyVersionsVersionPublicKey... publicKeys) { + return publicKeys(List.of(publicKeys)); + } + @CustomType.Setter + public Builder state(String state) { + if (state == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsVersion", "state"); + } + this.state = state; + return this; + } + @CustomType.Setter + public Builder version(Integer version) { + if (version == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsVersion", "version"); + } + this.version = version; + return this; + } + public GetCryptoKeyVersionsVersion build() { + final var _resultValue = new GetCryptoKeyVersionsVersion(); + _resultValue.algorithm = algorithm; + _resultValue.cryptoKey = cryptoKey; + _resultValue.id = id; + _resultValue.name = name; + _resultValue.protectionLevel = protectionLevel; + _resultValue.publicKeys = publicKeys; + _resultValue.state = state; + _resultValue.version = version; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsVersionPublicKey.java b/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsVersionPublicKey.java new file mode 100644 index 0000000000..8f8477d5a0 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/kms/outputs/GetCryptoKeyVersionsVersionPublicKey.java @@ -0,0 +1,81 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.kms.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; + +@CustomType +public final class GetCryptoKeyVersionsVersionPublicKey { + /** + * @return The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + * + */ + private String algorithm; + /** + * @return The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + * + */ + private String pem; + + private GetCryptoKeyVersionsVersionPublicKey() {} + /** + * @return The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + * + */ + public String algorithm() { + return this.algorithm; + } + /** + * @return The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + * + */ + public String pem() { + return this.pem; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(GetCryptoKeyVersionsVersionPublicKey defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String algorithm; + private String pem; + public Builder() {} + public Builder(GetCryptoKeyVersionsVersionPublicKey defaults) { + Objects.requireNonNull(defaults); + this.algorithm = defaults.algorithm; + this.pem = defaults.pem; + } + + @CustomType.Setter + public Builder algorithm(String algorithm) { + if (algorithm == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsVersionPublicKey", "algorithm"); + } + this.algorithm = algorithm; + return this; + } + @CustomType.Setter + public Builder pem(String pem) { + if (pem == null) { + throw new MissingRequiredPropertyException("GetCryptoKeyVersionsVersionPublicKey", "pem"); + } + this.pem = pem; + return this; + } + public GetCryptoKeyVersionsVersionPublicKey build() { + final var _resultValue = new GetCryptoKeyVersionsVersionPublicKey(); + _resultValue.algorithm = algorithm; + _resultValue.pem = pem; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/netapp/ActiveDirectory.java b/sdk/java/src/main/java/com/pulumi/gcp/netapp/ActiveDirectory.java index 8ac48621d5..0d8bf082c6 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/netapp/ActiveDirectory.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/netapp/ActiveDirectory.java @@ -20,7 +20,7 @@ /** * ActiveDirectory is the public representation of the active directory config. * - * To get more information about activeDirectory, see: + * To get more information about ActiveDirectory, see: * * * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.activeDirectories) * * How-to Guides @@ -90,7 +90,7 @@ * * ## Import * - * activeDirectory can be imported using any of these accepted formats: + * ActiveDirectory can be imported using any of these accepted formats: * * * `projects/{{project}}/locations/{{location}}/activeDirectories/{{name}}` * @@ -98,7 +98,7 @@ * * * `{{location}}/{{name}}` * - * When using the `pulumi import` command, activeDirectory can be imported using one of the formats above. For example: + * When using the `pulumi import` command, ActiveDirectory can be imported using one of the formats above. For example: * * ```sh * $ pulumi import gcp:netapp/activeDirectory:ActiveDirectory default projects/{{project}}/locations/{{location}}/activeDirectories/{{name}} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/netapp/Backup.java b/sdk/java/src/main/java/com/pulumi/gcp/netapp/Backup.java index cba02a8570..2c1e915d0e 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/netapp/Backup.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/netapp/Backup.java @@ -32,7 +32,7 @@ * from a volume or from an existing volume snapshot. Scheduled backups * require a backup policy. * - * To get more information about backup, see: + * To get more information about Backup, see: * * * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults.backups) * * How-to Guides @@ -119,7 +119,7 @@ * * ## Import * - * backup can be imported using any of these accepted formats: + * Backup can be imported using any of these accepted formats: * * * `projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}}` * @@ -127,7 +127,7 @@ * * * `{{location}}/{{vault_name}}/{{name}}` * - * When using the `pulumi import` command, backup can be imported using one of the formats above. For example: + * When using the `pulumi import` command, Backup can be imported using one of the formats above. For example: * * ```sh * $ pulumi import gcp:netapp/backup:Backup default projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/netapp/BackupPolicy.java b/sdk/java/src/main/java/com/pulumi/gcp/netapp/BackupPolicy.java index ce7600e3b9..fa8f5e220c 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/netapp/BackupPolicy.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/netapp/BackupPolicy.java @@ -23,7 +23,7 @@ * Backup policies allow you to attach a backup schedule to a volume. * The policy defines how many backups to retain at daily, weekly, or monthly intervals. * - * To get more information about backupPolicy, see: + * To get more information about BackupPolicy, see: * * * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupPolicies) * * How-to Guides @@ -75,7 +75,7 @@ * * ## Import * - * backupPolicy can be imported using any of these accepted formats: + * BackupPolicy can be imported using any of these accepted formats: * * * `projects/{{project}}/locations/{{location}}/backupPolicies/{{name}}` * @@ -83,7 +83,7 @@ * * * `{{location}}/{{name}}` * - * When using the `pulumi import` command, backupPolicy can be imported using one of the formats above. For example: + * When using the `pulumi import` command, BackupPolicy can be imported using one of the formats above. For example: * * ```sh * $ pulumi import gcp:netapp/backupPolicy:BackupPolicy default projects/{{project}}/locations/{{location}}/backupPolicies/{{name}} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/netapp/BackupVault.java b/sdk/java/src/main/java/com/pulumi/gcp/netapp/BackupVault.java index f7914a4c3b..7de10c55c6 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/netapp/BackupVault.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/netapp/BackupVault.java @@ -20,7 +20,7 @@ * A backup vault is the location where backups are stored. You can only create one backup vault per region. * A vault can hold multiple backups for multiple volumes in that region. * - * To get more information about backupVault, see: + * To get more information about BackupVault, see: * * * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults) * * How-to Guides @@ -68,7 +68,7 @@ * * ## Import * - * backupVault can be imported using any of these accepted formats: + * BackupVault can be imported using any of these accepted formats: * * * `projects/{{project}}/locations/{{location}}/backupVaults/{{name}}` * @@ -76,7 +76,7 @@ * * * `{{location}}/{{name}}` * - * When using the `pulumi import` command, backupVault can be imported using one of the formats above. For example: + * When using the `pulumi import` command, BackupVault can be imported using one of the formats above. For example: * * ```sh * $ pulumi import gcp:netapp/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{name}} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/netapp/StoragePool.java b/sdk/java/src/main/java/com/pulumi/gcp/netapp/StoragePool.java index 08be0643d5..57fae43d5d 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/netapp/StoragePool.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/netapp/StoragePool.java @@ -104,7 +104,7 @@ * * ## Import * - * storagePool can be imported using any of these accepted formats: + * StoragePool can be imported using any of these accepted formats: * * * `projects/{{project}}/locations/{{location}}/storagePools/{{name}}` * @@ -112,7 +112,7 @@ * * * `{{location}}/{{name}}` * - * When using the `pulumi import` command, storagePool can be imported using one of the formats above. For example: + * When using the `pulumi import` command, StoragePool can be imported using one of the formats above. For example: * * ```sh * $ pulumi import gcp:netapp/storagePool:StoragePool default projects/{{project}}/locations/{{location}}/storagePools/{{name}} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/netapp/Volume.java b/sdk/java/src/main/java/com/pulumi/gcp/netapp/Volume.java index 0709c0bf79..04d1fd0265 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/netapp/Volume.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/netapp/Volume.java @@ -184,6 +184,7 @@ public Output createTime() { * Policy to determine if the volume should be deleted forcefully. * Volumes may have nested snapshot resources. Deleting such a volume will fail. * Setting this parameter to FORCE will delete volumes including nested snapshots. + * Possible values: DEFAULT, FORCE. * */ @Export(name="deletionPolicy", refs={String.class}, tree="[0]") @@ -193,6 +194,7 @@ public Output createTime() { * @return Policy to determine if the volume should be deleted forcefully. * Volumes may have nested snapshot resources. Deleting such a volume will fail. * Setting this parameter to FORCE will delete volumes including nested snapshots. + * Possible values: DEFAULT, FORCE. * */ public Output> deletionPolicy() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/netapp/VolumeArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/netapp/VolumeArgs.java index ca4a2b6d5e..ad37702e96 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/netapp/VolumeArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/netapp/VolumeArgs.java @@ -59,6 +59,7 @@ public Output capacityGib() { * Policy to determine if the volume should be deleted forcefully. * Volumes may have nested snapshot resources. Deleting such a volume will fail. * Setting this parameter to FORCE will delete volumes including nested snapshots. + * Possible values: DEFAULT, FORCE. * */ @Import(name="deletionPolicy") @@ -68,6 +69,7 @@ public Output capacityGib() { * @return Policy to determine if the volume should be deleted forcefully. * Volumes may have nested snapshot resources. Deleting such a volume will fail. * Setting this parameter to FORCE will delete volumes including nested snapshots. + * Possible values: DEFAULT, FORCE. * */ public Optional> deletionPolicy() { @@ -450,6 +452,7 @@ public Builder capacityGib(String capacityGib) { * @param deletionPolicy Policy to determine if the volume should be deleted forcefully. * Volumes may have nested snapshot resources. Deleting such a volume will fail. * Setting this parameter to FORCE will delete volumes including nested snapshots. + * Possible values: DEFAULT, FORCE. * * @return builder * @@ -463,6 +466,7 @@ public Builder deletionPolicy(@Nullable Output deletionPolicy) { * @param deletionPolicy Policy to determine if the volume should be deleted forcefully. * Volumes may have nested snapshot resources. Deleting such a volume will fail. * Setting this parameter to FORCE will delete volumes including nested snapshots. + * Possible values: DEFAULT, FORCE. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/netapp/inputs/VolumeState.java b/sdk/java/src/main/java/com/pulumi/gcp/netapp/inputs/VolumeState.java index 402fccca07..51db1641e7 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/netapp/inputs/VolumeState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/netapp/inputs/VolumeState.java @@ -89,6 +89,7 @@ public Optional> createTime() { * Policy to determine if the volume should be deleted forcefully. * Volumes may have nested snapshot resources. Deleting such a volume will fail. * Setting this parameter to FORCE will delete volumes including nested snapshots. + * Possible values: DEFAULT, FORCE. * */ @Import(name="deletionPolicy") @@ -98,6 +99,7 @@ public Optional> createTime() { * @return Policy to determine if the volume should be deleted forcefully. * Volumes may have nested snapshot resources. Deleting such a volume will fail. * Setting this parameter to FORCE will delete volumes including nested snapshots. + * Possible values: DEFAULT, FORCE. * */ public Optional> deletionPolicy() { @@ -768,6 +770,7 @@ public Builder createTime(String createTime) { * @param deletionPolicy Policy to determine if the volume should be deleted forcefully. * Volumes may have nested snapshot resources. Deleting such a volume will fail. * Setting this parameter to FORCE will delete volumes including nested snapshots. + * Possible values: DEFAULT, FORCE. * * @return builder * @@ -781,6 +784,7 @@ public Builder deletionPolicy(@Nullable Output deletionPolicy) { * @param deletionPolicy Policy to determine if the volume should be deleted forcefully. * Volumes may have nested snapshot resources. Deleting such a volume will fail. * Setting this parameter to FORCE will delete volumes including nested snapshots. + * Possible values: DEFAULT, FORCE. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/networkconnectivity/Spoke.java b/sdk/java/src/main/java/com/pulumi/gcp/networkconnectivity/Spoke.java index 8787dab1b9..e47fe22725 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/networkconnectivity/Spoke.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/networkconnectivity/Spoke.java @@ -82,6 +82,9 @@ * .excludeExportRanges( * "198.51.100.0/24", * "10.10.0.0/16") + * .includeExportRanges( + * "198.51.100.0/23", + * "10.0.0.0/8") * .uri(network.selfLink()) * .build()) * .build()); diff --git a/sdk/java/src/main/java/com/pulumi/gcp/networkconnectivity/inputs/SpokeLinkedVpcNetworkArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/networkconnectivity/inputs/SpokeLinkedVpcNetworkArgs.java index 977a2ba11b..46de0cf1dd 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/networkconnectivity/inputs/SpokeLinkedVpcNetworkArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/networkconnectivity/inputs/SpokeLinkedVpcNetworkArgs.java @@ -32,6 +32,21 @@ public Optional>> excludeExportRanges() { return Optional.ofNullable(this.excludeExportRanges); } + /** + * IP ranges allowed to be included from peering. + * + */ + @Import(name="includeExportRanges") + private @Nullable Output> includeExportRanges; + + /** + * @return IP ranges allowed to be included from peering. + * + */ + public Optional>> includeExportRanges() { + return Optional.ofNullable(this.includeExportRanges); + } + /** * The URI of the VPC network resource. * @@ -51,6 +66,7 @@ private SpokeLinkedVpcNetworkArgs() {} private SpokeLinkedVpcNetworkArgs(SpokeLinkedVpcNetworkArgs $) { this.excludeExportRanges = $.excludeExportRanges; + this.includeExportRanges = $.includeExportRanges; this.uri = $.uri; } @@ -103,6 +119,37 @@ public Builder excludeExportRanges(String... excludeExportRanges) { return excludeExportRanges(List.of(excludeExportRanges)); } + /** + * @param includeExportRanges IP ranges allowed to be included from peering. + * + * @return builder + * + */ + public Builder includeExportRanges(@Nullable Output> includeExportRanges) { + $.includeExportRanges = includeExportRanges; + return this; + } + + /** + * @param includeExportRanges IP ranges allowed to be included from peering. + * + * @return builder + * + */ + public Builder includeExportRanges(List includeExportRanges) { + return includeExportRanges(Output.of(includeExportRanges)); + } + + /** + * @param includeExportRanges IP ranges allowed to be included from peering. + * + * @return builder + * + */ + public Builder includeExportRanges(String... includeExportRanges) { + return includeExportRanges(List.of(includeExportRanges)); + } + /** * @param uri The URI of the VPC network resource. * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/networkconnectivity/outputs/SpokeLinkedVpcNetwork.java b/sdk/java/src/main/java/com/pulumi/gcp/networkconnectivity/outputs/SpokeLinkedVpcNetwork.java index fe48ab6110..1dc12bf357 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/networkconnectivity/outputs/SpokeLinkedVpcNetwork.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/networkconnectivity/outputs/SpokeLinkedVpcNetwork.java @@ -17,6 +17,11 @@ public final class SpokeLinkedVpcNetwork { * */ private @Nullable List excludeExportRanges; + /** + * @return IP ranges allowed to be included from peering. + * + */ + private @Nullable List includeExportRanges; /** * @return The URI of the VPC network resource. * @@ -31,6 +36,13 @@ private SpokeLinkedVpcNetwork() {} public List excludeExportRanges() { return this.excludeExportRanges == null ? List.of() : this.excludeExportRanges; } + /** + * @return IP ranges allowed to be included from peering. + * + */ + public List includeExportRanges() { + return this.includeExportRanges == null ? List.of() : this.includeExportRanges; + } /** * @return The URI of the VPC network resource. * @@ -49,11 +61,13 @@ public static Builder builder(SpokeLinkedVpcNetwork defaults) { @CustomType.Builder public static final class Builder { private @Nullable List excludeExportRanges; + private @Nullable List includeExportRanges; private String uri; public Builder() {} public Builder(SpokeLinkedVpcNetwork defaults) { Objects.requireNonNull(defaults); this.excludeExportRanges = defaults.excludeExportRanges; + this.includeExportRanges = defaults.includeExportRanges; this.uri = defaults.uri; } @@ -67,6 +81,15 @@ public Builder excludeExportRanges(String... excludeExportRanges) { return excludeExportRanges(List.of(excludeExportRanges)); } @CustomType.Setter + public Builder includeExportRanges(@Nullable List includeExportRanges) { + + this.includeExportRanges = includeExportRanges; + return this; + } + public Builder includeExportRanges(String... includeExportRanges) { + return includeExportRanges(List.of(includeExportRanges)); + } + @CustomType.Setter public Builder uri(String uri) { if (uri == null) { throw new MissingRequiredPropertyException("SpokeLinkedVpcNetwork", "uri"); @@ -77,6 +100,7 @@ public Builder uri(String uri) { public SpokeLinkedVpcNetwork build() { final var _resultValue = new SpokeLinkedVpcNetwork(); _resultValue.excludeExportRanges = excludeExportRanges; + _resultValue.includeExportRanges = includeExportRanges; _resultValue.uri = uri; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/networksecurity/ClientTlsPolicy.java b/sdk/java/src/main/java/com/pulumi/gcp/networksecurity/ClientTlsPolicy.java index c8297b99da..5fad76fc63 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/networksecurity/ClientTlsPolicy.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/networksecurity/ClientTlsPolicy.java @@ -19,6 +19,14 @@ import javax.annotation.Nullable; /** + * ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. + * + * To get more information about ClientTlsPolicy, see: + * + * * [API documentation](https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.clientTlsPolicies) + * * How-to Guides + * * [Service Security](https://cloud.google.com/traffic-director/docs/security-use-cases) + * * ## Example Usage * * ### Network Security Client Tls Policy Basic @@ -96,17 +104,11 @@ * .pluginInstance("google_cloud_private_spiffe") * .build()) * .build()) - * .serverValidationCas( - * ClientTlsPolicyServerValidationCaArgs.builder() - * .grpcEndpoint(ClientTlsPolicyServerValidationCaGrpcEndpointArgs.builder() - * .targetUri("unix:mypath") - * .build()) - * .build(), - * ClientTlsPolicyServerValidationCaArgs.builder() - * .grpcEndpoint(ClientTlsPolicyServerValidationCaGrpcEndpointArgs.builder() - * .targetUri("unix:mypath1") - * .build()) + * .serverValidationCas(ClientTlsPolicyServerValidationCaArgs.builder() + * .grpcEndpoint(ClientTlsPolicyServerValidationCaGrpcEndpointArgs.builder() + * .targetUri("unix:mypath") * .build()) + * .build()) * .build()); * * } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/networksecurity/ServerTlsPolicy.java b/sdk/java/src/main/java/com/pulumi/gcp/networksecurity/ServerTlsPolicy.java index c5f8a8cc22..26ccb50186 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/networksecurity/ServerTlsPolicy.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/networksecurity/ServerTlsPolicy.java @@ -20,6 +20,12 @@ import javax.annotation.Nullable; /** + * ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. + * + * To get more information about ServerTlsPolicy, see: + * + * * [API documentation](https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.serverTlsPolicies) + * * ## Example Usage * * ### Network Security Server Tls Policy Basic @@ -61,22 +67,11 @@ * .build()) * .build()) * .mtlsPolicy(ServerTlsPolicyMtlsPolicyArgs.builder() - * .clientValidationCas( - * ServerTlsPolicyMtlsPolicyClientValidationCaArgs.builder() - * .grpcEndpoint(ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs.builder() - * .targetUri("unix:mypath") - * .build()) - * .build(), - * ServerTlsPolicyMtlsPolicyClientValidationCaArgs.builder() - * .grpcEndpoint(ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs.builder() - * .targetUri("unix:abc/mypath") - * .build()) - * .build(), - * ServerTlsPolicyMtlsPolicyClientValidationCaArgs.builder() - * .certificateProviderInstance(ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstanceArgs.builder() - * .pluginInstance("google_cloud_private_spiffe") - * .build()) + * .clientValidationCas(ServerTlsPolicyMtlsPolicyClientValidationCaArgs.builder() + * .grpcEndpoint(ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs.builder() + * .targetUri("unix:mypath") * .build()) + * .build()) * .build()) * .build()); * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/organizations/Project.java b/sdk/java/src/main/java/com/pulumi/gcp/organizations/Project.java index 355e1844dd..b5964f9490 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/organizations/Project.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/organizations/Project.java @@ -30,6 +30,10 @@ * * > This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account. * + * > It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `auto_create_network` to false, when possible. + * + * > It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. + * * To get more information about projects, see: * * * [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects) @@ -117,6 +121,44 @@ * * <!--End PulumiCodeChooser --> * + * To create a project with a tag + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.organizations.Project;
+ * import com.pulumi.gcp.organizations.ProjectArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var myProject = new Project("myProject", ProjectArgs.builder()
+ *             .name("My Project")
+ *             .projectId("your-project-id")
+ *             .orgId("1234567")
+ *             .tags(Map.of("1234567/env", "staging"))
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> + * * ## Import * * Projects can be imported using the `project_id`, e.g. @@ -312,6 +354,20 @@ public Output projectId() { public Output> pulumiLabels() { return this.pulumiLabels; } + /** + * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + * + */ + @Export(name="tags", refs={Map.class,String.class}, tree="[0,1,1]") + private Output> tags; + + /** + * @return A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + * + */ + public Output>> tags() { + return Codegen.optional(this.tags); + } /** * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/organizations/ProjectArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/organizations/ProjectArgs.java index 995f3f6478..d5b41cc452 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/organizations/ProjectArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/organizations/ProjectArgs.java @@ -163,6 +163,21 @@ public Optional> projectId() { return Optional.ofNullable(this.projectId); } + /** + * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + * + */ + @Import(name="tags") + private @Nullable Output> tags; + + /** + * @return A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + * + */ + public Optional>> tags() { + return Optional.ofNullable(this.tags); + } + private ProjectArgs() {} private ProjectArgs(ProjectArgs $) { @@ -174,6 +189,7 @@ private ProjectArgs(ProjectArgs $) { this.name = $.name; this.orgId = $.orgId; this.projectId = $.projectId; + this.tags = $.tags; } public static Builder builder() { @@ -384,6 +400,27 @@ public Builder projectId(String projectId) { return projectId(Output.of(projectId)); } + /** + * @param tags A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + * + * @return builder + * + */ + public Builder tags(@Nullable Output> tags) { + $.tags = tags; + return this; + } + + /** + * @param tags A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + * + * @return builder + * + */ + public Builder tags(Map tags) { + return tags(Output.of(tags)); + } + public ProjectArgs build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/organizations/inputs/ProjectState.java b/sdk/java/src/main/java/com/pulumi/gcp/organizations/inputs/ProjectState.java index 8c448cd89f..ceb3afec80 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/organizations/inputs/ProjectState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/organizations/inputs/ProjectState.java @@ -208,6 +208,21 @@ public Optional>> pulumiLabels() { return Optional.ofNullable(this.pulumiLabels); } + /** + * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + * + */ + @Import(name="tags") + private @Nullable Output> tags; + + /** + * @return A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + * + */ + public Optional>> tags() { + return Optional.ofNullable(this.tags); + } + private ProjectState() {} private ProjectState(ProjectState $) { @@ -222,6 +237,7 @@ private ProjectState(ProjectState $) { this.orgId = $.orgId; this.projectId = $.projectId; this.pulumiLabels = $.pulumiLabels; + this.tags = $.tags; } public static Builder builder() { @@ -495,6 +511,27 @@ public Builder pulumiLabels(Map pulumiLabels) { return pulumiLabels(Output.of(pulumiLabels)); } + /** + * @param tags A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + * + * @return builder + * + */ + public Builder tags(@Nullable Output> tags) { + $.tags = tags; + return this; + } + + /** + * @param tags A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + * + * @return builder + * + */ + public Builder tags(Map tags) { + return tags(Output.of(tags)); + } + public ProjectState build() { return $; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/organizations/outputs/GetProjectResult.java b/sdk/java/src/main/java/com/pulumi/gcp/organizations/outputs/GetProjectResult.java index 3aa0354161..70b305e68b 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/organizations/outputs/GetProjectResult.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/organizations/outputs/GetProjectResult.java @@ -34,6 +34,7 @@ public final class GetProjectResult { private String orgId; private @Nullable String projectId; private Map pulumiLabels; + private Map tags; private GetProjectResult() {} public Boolean autoCreateNetwork() { @@ -80,6 +81,9 @@ public Optional projectId() { public Map pulumiLabels() { return this.pulumiLabels; } + public Map tags() { + return this.tags; + } public static Builder builder() { return new Builder(); @@ -102,6 +106,7 @@ public static final class Builder { private String orgId; private @Nullable String projectId; private Map pulumiLabels; + private Map tags; public Builder() {} public Builder(GetProjectResult defaults) { Objects.requireNonNull(defaults); @@ -117,6 +122,7 @@ public Builder(GetProjectResult defaults) { this.orgId = defaults.orgId; this.projectId = defaults.projectId; this.pulumiLabels = defaults.pulumiLabels; + this.tags = defaults.tags; } @CustomType.Setter @@ -213,6 +219,14 @@ public Builder pulumiLabels(Map pulumiLabels) { this.pulumiLabels = pulumiLabels; return this; } + @CustomType.Setter + public Builder tags(Map tags) { + if (tags == null) { + throw new MissingRequiredPropertyException("GetProjectResult", "tags"); + } + this.tags = tags; + return this; + } public GetProjectResult build() { final var _resultValue = new GetProjectResult(); _resultValue.autoCreateNetwork = autoCreateNetwork; @@ -227,6 +241,7 @@ public GetProjectResult build() { _resultValue.orgId = orgId; _resultValue.projectId = projectId; _resultValue.pulumiLabels = pulumiLabels; + _resultValue.tags = tags; return _resultValue; } } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/parallelstore/Instance.java b/sdk/java/src/main/java/com/pulumi/gcp/parallelstore/Instance.java index 8139d3220d..0f373a3971 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/parallelstore/Instance.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/parallelstore/Instance.java @@ -164,14 +164,14 @@ public Output createTime() { return this.createTime; } /** - * The version of DAOS software running in the instance + * The version of DAOS software running in the instance. * */ @Export(name="daosVersion", refs={String.class}, tree="[0]") private Output daosVersion; /** - * @return The version of DAOS software running in the instance + * @return The version of DAOS software running in the instance. * */ public Output daosVersion() { @@ -234,9 +234,9 @@ public Output> effectiveLabels() { return this.effectiveLabels; } /** - * Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. This field is populated by the service and + * Immutable. Contains the id of the allocated IP address + * range associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. This field is populated by the service * and contains the value currently used by the service. * */ @@ -244,9 +244,9 @@ public Output> effectiveLabels() { private Output effectiveReservedIpRange; /** - * @return Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. This field is populated by the service and + * @return Immutable. Contains the id of the allocated IP address + * range associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. This field is populated by the service * and contains the value currently used by the service. * */ @@ -310,12 +310,12 @@ public Output instanceId() { return this.instanceId; } /** - * Cloud Labels are a flexible and lightweight mechanism for organizing cloud - * resources into groups that reflect a customer's organizational needs and - * deployment strategies. Cloud Labels can be used to filter collections of - * resources. They can be used to control how resource metrics are aggregated. - * And they can be used as arguments to policy management rules (e.g. route, - * firewall, load balancing, etc.). + * Cloud Labels are a flexible and lightweight mechanism for + * organizing cloud resources into groups that reflect a customer's organizational + * needs and deployment strategies. Cloud Labels can be used to filter collections + * of resources. They can be used to control how resource metrics are aggregated. + * And they can be used as arguments to policy management rules (e.g. route, firewall, + * load balancing, etc.). * * Label keys must be between 1 and 63 characters long and must conform to * the following regular expression: `a-z{0,62}`. * * Label values must be between 0 and 63 characters long and must conform @@ -326,22 +326,23 @@ public Output instanceId() { * characters may be allowed in the future. Therefore, you are advised to use * an internal label representation, such as JSON, which doesn't rely upon * specific characters being disallowed. For example, representing labels - * as the string: name + "_" + value would prove problematic if we were to - * allow "_" in a future release. - * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - * Please refer to the field `effective_labels` for all of the labels present on the resource. + * as the string: `name + "_" + value` would prove problematic if we were to + * allow `"_"` in a future release. " + * + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. * */ @Export(name="labels", refs={Map.class,String.class}, tree="[0,1,1]") private Output> labels; /** - * @return Cloud Labels are a flexible and lightweight mechanism for organizing cloud - * resources into groups that reflect a customer's organizational needs and - * deployment strategies. Cloud Labels can be used to filter collections of - * resources. They can be used to control how resource metrics are aggregated. - * And they can be used as arguments to policy management rules (e.g. route, - * firewall, load balancing, etc.). + * @return Cloud Labels are a flexible and lightweight mechanism for + * organizing cloud resources into groups that reflect a customer's organizational + * needs and deployment strategies. Cloud Labels can be used to filter collections + * of resources. They can be used to control how resource metrics are aggregated. + * And they can be used as arguments to policy management rules (e.g. route, firewall, + * load balancing, etc.). * * Label keys must be between 1 and 63 characters long and must conform to * the following regular expression: `a-z{0,62}`. * * Label values must be between 0 and 63 characters long and must conform @@ -352,10 +353,11 @@ public Output instanceId() { * characters may be allowed in the future. Therefore, you are advised to use * an internal label representation, such as JSON, which doesn't rely upon * specific characters being disallowed. For example, representing labels - * as the string: name + "_" + value would prove problematic if we were to - * allow "_" in a future release. - * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - * Please refer to the field `effective_labels` for all of the labels present on the resource. + * as the string: `name + "_" + value` would prove problematic if we were to + * allow `"_"` in a future release. " + * + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. * */ public Output>> labels() { @@ -392,18 +394,16 @@ public Output name() { return this.name; } /** - * Immutable. The name of the Google Compute Engine - * [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - * instance is connected. + * Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + * to which the instance is connected. * */ @Export(name="network", refs={String.class}, tree="[0]") private Output network; /** - * @return Immutable. The name of the Google Compute Engine - * [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - * instance is connected. + * @return Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + * to which the instance is connected. * */ public Output> network() { @@ -442,20 +442,20 @@ public Output> pulumiLabels() { return this.pulumiLabels; } /** - * Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. If no range id is provided all ranges will be - * considered. + * Immutable. Contains the id of the allocated IP address range + * associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + * be considered. * */ @Export(name="reservedIpRange", refs={String.class}, tree="[0]") private Output reservedIpRange; /** - * @return Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. If no range id is provided all ranges will be - * considered. + * @return Immutable. Contains the id of the allocated IP address range + * associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + * be considered. * */ public Output> reservedIpRange() { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/parallelstore/InstanceArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/parallelstore/InstanceArgs.java index 7dac148980..a78094015b 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/parallelstore/InstanceArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/parallelstore/InstanceArgs.java @@ -135,12 +135,12 @@ public Output instanceId() { } /** - * Cloud Labels are a flexible and lightweight mechanism for organizing cloud - * resources into groups that reflect a customer's organizational needs and - * deployment strategies. Cloud Labels can be used to filter collections of - * resources. They can be used to control how resource metrics are aggregated. - * And they can be used as arguments to policy management rules (e.g. route, - * firewall, load balancing, etc.). + * Cloud Labels are a flexible and lightweight mechanism for + * organizing cloud resources into groups that reflect a customer's organizational + * needs and deployment strategies. Cloud Labels can be used to filter collections + * of resources. They can be used to control how resource metrics are aggregated. + * And they can be used as arguments to policy management rules (e.g. route, firewall, + * load balancing, etc.). * * Label keys must be between 1 and 63 characters long and must conform to * the following regular expression: `a-z{0,62}`. * * Label values must be between 0 and 63 characters long and must conform @@ -151,22 +151,23 @@ public Output instanceId() { * characters may be allowed in the future. Therefore, you are advised to use * an internal label representation, such as JSON, which doesn't rely upon * specific characters being disallowed. For example, representing labels - * as the string: name + "_" + value would prove problematic if we were to - * allow "_" in a future release. - * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - * Please refer to the field `effective_labels` for all of the labels present on the resource. + * as the string: `name + "_" + value` would prove problematic if we were to + * allow `"_"` in a future release. " + * + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. * */ @Import(name="labels") private @Nullable Output> labels; /** - * @return Cloud Labels are a flexible and lightweight mechanism for organizing cloud - * resources into groups that reflect a customer's organizational needs and - * deployment strategies. Cloud Labels can be used to filter collections of - * resources. They can be used to control how resource metrics are aggregated. - * And they can be used as arguments to policy management rules (e.g. route, - * firewall, load balancing, etc.). + * @return Cloud Labels are a flexible and lightweight mechanism for + * organizing cloud resources into groups that reflect a customer's organizational + * needs and deployment strategies. Cloud Labels can be used to filter collections + * of resources. They can be used to control how resource metrics are aggregated. + * And they can be used as arguments to policy management rules (e.g. route, firewall, + * load balancing, etc.). * * Label keys must be between 1 and 63 characters long and must conform to * the following regular expression: `a-z{0,62}`. * * Label values must be between 0 and 63 characters long and must conform @@ -177,10 +178,11 @@ public Output instanceId() { * characters may be allowed in the future. Therefore, you are advised to use * an internal label representation, such as JSON, which doesn't rely upon * specific characters being disallowed. For example, representing labels - * as the string: name + "_" + value would prove problematic if we were to - * allow "_" in a future release. - * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - * Please refer to the field `effective_labels` for all of the labels present on the resource. + * as the string: `name + "_" + value` would prove problematic if we were to + * allow `"_"` in a future release. " + * + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. * */ public Optional>> labels() { @@ -203,18 +205,16 @@ public Output location() { } /** - * Immutable. The name of the Google Compute Engine - * [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - * instance is connected. + * Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + * to which the instance is connected. * */ @Import(name="network") private @Nullable Output network; /** - * @return Immutable. The name of the Google Compute Engine - * [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - * instance is connected. + * @return Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + * to which the instance is connected. * */ public Optional> network() { @@ -239,20 +239,20 @@ public Optional> project() { } /** - * Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. If no range id is provided all ranges will be - * considered. + * Immutable. Contains the id of the allocated IP address range + * associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + * be considered. * */ @Import(name="reservedIpRange") private @Nullable Output reservedIpRange; /** - * @return Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. If no range id is provided all ranges will be - * considered. + * @return Immutable. Contains the id of the allocated IP address range + * associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + * be considered. * */ public Optional> reservedIpRange() { @@ -440,12 +440,12 @@ public Builder instanceId(String instanceId) { } /** - * @param labels Cloud Labels are a flexible and lightweight mechanism for organizing cloud - * resources into groups that reflect a customer's organizational needs and - * deployment strategies. Cloud Labels can be used to filter collections of - * resources. They can be used to control how resource metrics are aggregated. - * And they can be used as arguments to policy management rules (e.g. route, - * firewall, load balancing, etc.). + * @param labels Cloud Labels are a flexible and lightweight mechanism for + * organizing cloud resources into groups that reflect a customer's organizational + * needs and deployment strategies. Cloud Labels can be used to filter collections + * of resources. They can be used to control how resource metrics are aggregated. + * And they can be used as arguments to policy management rules (e.g. route, firewall, + * load balancing, etc.). * * Label keys must be between 1 and 63 characters long and must conform to * the following regular expression: `a-z{0,62}`. * * Label values must be between 0 and 63 characters long and must conform @@ -456,10 +456,11 @@ public Builder instanceId(String instanceId) { * characters may be allowed in the future. Therefore, you are advised to use * an internal label representation, such as JSON, which doesn't rely upon * specific characters being disallowed. For example, representing labels - * as the string: name + "_" + value would prove problematic if we were to - * allow "_" in a future release. - * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - * Please refer to the field `effective_labels` for all of the labels present on the resource. + * as the string: `name + "_" + value` would prove problematic if we were to + * allow `"_"` in a future release. " + * + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. * * @return builder * @@ -470,12 +471,12 @@ public Builder labels(@Nullable Output> labels) { } /** - * @param labels Cloud Labels are a flexible and lightweight mechanism for organizing cloud - * resources into groups that reflect a customer's organizational needs and - * deployment strategies. Cloud Labels can be used to filter collections of - * resources. They can be used to control how resource metrics are aggregated. - * And they can be used as arguments to policy management rules (e.g. route, - * firewall, load balancing, etc.). + * @param labels Cloud Labels are a flexible and lightweight mechanism for + * organizing cloud resources into groups that reflect a customer's organizational + * needs and deployment strategies. Cloud Labels can be used to filter collections + * of resources. They can be used to control how resource metrics are aggregated. + * And they can be used as arguments to policy management rules (e.g. route, firewall, + * load balancing, etc.). * * Label keys must be between 1 and 63 characters long and must conform to * the following regular expression: `a-z{0,62}`. * * Label values must be between 0 and 63 characters long and must conform @@ -486,10 +487,11 @@ public Builder labels(@Nullable Output> labels) { * characters may be allowed in the future. Therefore, you are advised to use * an internal label representation, such as JSON, which doesn't rely upon * specific characters being disallowed. For example, representing labels - * as the string: name + "_" + value would prove problematic if we were to - * allow "_" in a future release. - * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - * Please refer to the field `effective_labels` for all of the labels present on the resource. + * as the string: `name + "_" + value` would prove problematic if we were to + * allow `"_"` in a future release. " + * + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. * * @return builder * @@ -520,9 +522,8 @@ public Builder location(String location) { } /** - * @param network Immutable. The name of the Google Compute Engine - * [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - * instance is connected. + * @param network Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + * to which the instance is connected. * * @return builder * @@ -533,9 +534,8 @@ public Builder network(@Nullable Output network) { } /** - * @param network Immutable. The name of the Google Compute Engine - * [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - * instance is connected. + * @param network Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + * to which the instance is connected. * * @return builder * @@ -568,10 +568,10 @@ public Builder project(String project) { } /** - * @param reservedIpRange Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. If no range id is provided all ranges will be - * considered. + * @param reservedIpRange Immutable. Contains the id of the allocated IP address range + * associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + * be considered. * * @return builder * @@ -582,10 +582,10 @@ public Builder reservedIpRange(@Nullable Output reservedIpRange) { } /** - * @param reservedIpRange Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. If no range id is provided all ranges will be - * considered. + * @param reservedIpRange Immutable. Contains the id of the allocated IP address range + * associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + * be considered. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/parallelstore/inputs/InstanceState.java b/sdk/java/src/main/java/com/pulumi/gcp/parallelstore/inputs/InstanceState.java index aabb801a54..5e554efdeb 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/parallelstore/inputs/InstanceState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/parallelstore/inputs/InstanceState.java @@ -65,14 +65,14 @@ public Optional> createTime() { } /** - * The version of DAOS software running in the instance + * The version of DAOS software running in the instance. * */ @Import(name="daosVersion") private @Nullable Output daosVersion; /** - * @return The version of DAOS software running in the instance + * @return The version of DAOS software running in the instance. * */ public Optional> daosVersion() { @@ -139,9 +139,9 @@ public Optional>> effectiveLabels() { } /** - * Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. This field is populated by the service and + * Immutable. Contains the id of the allocated IP address + * range associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. This field is populated by the service * and contains the value currently used by the service. * */ @@ -149,9 +149,9 @@ public Optional>> effectiveLabels() { private @Nullable Output effectiveReservedIpRange; /** - * @return Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. This field is populated by the service and + * @return Immutable. Contains the id of the allocated IP address + * range associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. This field is populated by the service * and contains the value currently used by the service. * */ @@ -218,12 +218,12 @@ public Optional> instanceId() { } /** - * Cloud Labels are a flexible and lightweight mechanism for organizing cloud - * resources into groups that reflect a customer's organizational needs and - * deployment strategies. Cloud Labels can be used to filter collections of - * resources. They can be used to control how resource metrics are aggregated. - * And they can be used as arguments to policy management rules (e.g. route, - * firewall, load balancing, etc.). + * Cloud Labels are a flexible and lightweight mechanism for + * organizing cloud resources into groups that reflect a customer's organizational + * needs and deployment strategies. Cloud Labels can be used to filter collections + * of resources. They can be used to control how resource metrics are aggregated. + * And they can be used as arguments to policy management rules (e.g. route, firewall, + * load balancing, etc.). * * Label keys must be between 1 and 63 characters long and must conform to * the following regular expression: `a-z{0,62}`. * * Label values must be between 0 and 63 characters long and must conform @@ -234,22 +234,23 @@ public Optional> instanceId() { * characters may be allowed in the future. Therefore, you are advised to use * an internal label representation, such as JSON, which doesn't rely upon * specific characters being disallowed. For example, representing labels - * as the string: name + "_" + value would prove problematic if we were to - * allow "_" in a future release. - * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - * Please refer to the field `effective_labels` for all of the labels present on the resource. + * as the string: `name + "_" + value` would prove problematic if we were to + * allow `"_"` in a future release. " + * + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. * */ @Import(name="labels") private @Nullable Output> labels; /** - * @return Cloud Labels are a flexible and lightweight mechanism for organizing cloud - * resources into groups that reflect a customer's organizational needs and - * deployment strategies. Cloud Labels can be used to filter collections of - * resources. They can be used to control how resource metrics are aggregated. - * And they can be used as arguments to policy management rules (e.g. route, - * firewall, load balancing, etc.). + * @return Cloud Labels are a flexible and lightweight mechanism for + * organizing cloud resources into groups that reflect a customer's organizational + * needs and deployment strategies. Cloud Labels can be used to filter collections + * of resources. They can be used to control how resource metrics are aggregated. + * And they can be used as arguments to policy management rules (e.g. route, firewall, + * load balancing, etc.). * * Label keys must be between 1 and 63 characters long and must conform to * the following regular expression: `a-z{0,62}`. * * Label values must be between 0 and 63 characters long and must conform @@ -260,10 +261,11 @@ public Optional> instanceId() { * characters may be allowed in the future. Therefore, you are advised to use * an internal label representation, such as JSON, which doesn't rely upon * specific characters being disallowed. For example, representing labels - * as the string: name + "_" + value would prove problematic if we were to - * allow "_" in a future release. - * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - * Please refer to the field `effective_labels` for all of the labels present on the resource. + * as the string: `name + "_" + value` would prove problematic if we were to + * allow `"_"` in a future release. " + * + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. * */ public Optional>> labels() { @@ -303,18 +305,16 @@ public Optional> name() { } /** - * Immutable. The name of the Google Compute Engine - * [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - * instance is connected. + * Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + * to which the instance is connected. * */ @Import(name="network") private @Nullable Output network; /** - * @return Immutable. The name of the Google Compute Engine - * [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - * instance is connected. + * @return Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + * to which the instance is connected. * */ public Optional> network() { @@ -356,20 +356,20 @@ public Optional>> pulumiLabels() { } /** - * Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. If no range id is provided all ranges will be - * considered. + * Immutable. Contains the id of the allocated IP address range + * associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + * be considered. * */ @Import(name="reservedIpRange") private @Nullable Output reservedIpRange; /** - * @return Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. If no range id is provided all ranges will be - * considered. + * @return Immutable. Contains the id of the allocated IP address range + * associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + * be considered. * */ public Optional> reservedIpRange() { @@ -539,7 +539,7 @@ public Builder createTime(String createTime) { } /** - * @param daosVersion The version of DAOS software running in the instance + * @param daosVersion The version of DAOS software running in the instance. * * @return builder * @@ -550,7 +550,7 @@ public Builder daosVersion(@Nullable Output daosVersion) { } /** - * @param daosVersion The version of DAOS software running in the instance + * @param daosVersion The version of DAOS software running in the instance. * * @return builder * @@ -637,9 +637,9 @@ public Builder effectiveLabels(Map effectiveLabels) { } /** - * @param effectiveReservedIpRange Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. This field is populated by the service and + * @param effectiveReservedIpRange Immutable. Contains the id of the allocated IP address + * range associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. This field is populated by the service * and contains the value currently used by the service. * * @return builder @@ -651,9 +651,9 @@ public Builder effectiveReservedIpRange(@Nullable Output effectiveReserv } /** - * @param effectiveReservedIpRange Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. This field is populated by the service and + * @param effectiveReservedIpRange Immutable. Contains the id of the allocated IP address + * range associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. This field is populated by the service * and contains the value currently used by the service. * * @return builder @@ -734,12 +734,12 @@ public Builder instanceId(String instanceId) { } /** - * @param labels Cloud Labels are a flexible and lightweight mechanism for organizing cloud - * resources into groups that reflect a customer's organizational needs and - * deployment strategies. Cloud Labels can be used to filter collections of - * resources. They can be used to control how resource metrics are aggregated. - * And they can be used as arguments to policy management rules (e.g. route, - * firewall, load balancing, etc.). + * @param labels Cloud Labels are a flexible and lightweight mechanism for + * organizing cloud resources into groups that reflect a customer's organizational + * needs and deployment strategies. Cloud Labels can be used to filter collections + * of resources. They can be used to control how resource metrics are aggregated. + * And they can be used as arguments to policy management rules (e.g. route, firewall, + * load balancing, etc.). * * Label keys must be between 1 and 63 characters long and must conform to * the following regular expression: `a-z{0,62}`. * * Label values must be between 0 and 63 characters long and must conform @@ -750,10 +750,11 @@ public Builder instanceId(String instanceId) { * characters may be allowed in the future. Therefore, you are advised to use * an internal label representation, such as JSON, which doesn't rely upon * specific characters being disallowed. For example, representing labels - * as the string: name + "_" + value would prove problematic if we were to - * allow "_" in a future release. - * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - * Please refer to the field `effective_labels` for all of the labels present on the resource. + * as the string: `name + "_" + value` would prove problematic if we were to + * allow `"_"` in a future release. " + * + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. * * @return builder * @@ -764,12 +765,12 @@ public Builder labels(@Nullable Output> labels) { } /** - * @param labels Cloud Labels are a flexible and lightweight mechanism for organizing cloud - * resources into groups that reflect a customer's organizational needs and - * deployment strategies. Cloud Labels can be used to filter collections of - * resources. They can be used to control how resource metrics are aggregated. - * And they can be used as arguments to policy management rules (e.g. route, - * firewall, load balancing, etc.). + * @param labels Cloud Labels are a flexible and lightweight mechanism for + * organizing cloud resources into groups that reflect a customer's organizational + * needs and deployment strategies. Cloud Labels can be used to filter collections + * of resources. They can be used to control how resource metrics are aggregated. + * And they can be used as arguments to policy management rules (e.g. route, firewall, + * load balancing, etc.). * * Label keys must be between 1 and 63 characters long and must conform to * the following regular expression: `a-z{0,62}`. * * Label values must be between 0 and 63 characters long and must conform @@ -780,10 +781,11 @@ public Builder labels(@Nullable Output> labels) { * characters may be allowed in the future. Therefore, you are advised to use * an internal label representation, such as JSON, which doesn't rely upon * specific characters being disallowed. For example, representing labels - * as the string: name + "_" + value would prove problematic if we were to - * allow "_" in a future release. - * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. - * Please refer to the field `effective_labels` for all of the labels present on the resource. + * as the string: `name + "_" + value` would prove problematic if we were to + * allow `"_"` in a future release. " + * + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effective_labels` for all of the labels present on the resource. * * @return builder * @@ -837,9 +839,8 @@ public Builder name(String name) { } /** - * @param network Immutable. The name of the Google Compute Engine - * [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - * instance is connected. + * @param network Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + * to which the instance is connected. * * @return builder * @@ -850,9 +851,8 @@ public Builder network(@Nullable Output network) { } /** - * @param network Immutable. The name of the Google Compute Engine - * [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - * instance is connected. + * @param network Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + * to which the instance is connected. * * @return builder * @@ -908,10 +908,10 @@ public Builder pulumiLabels(Map pulumiLabels) { } /** - * @param reservedIpRange Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. If no range id is provided all ranges will be - * considered. + * @param reservedIpRange Immutable. Contains the id of the allocated IP address range + * associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + * be considered. * * @return builder * @@ -922,10 +922,10 @@ public Builder reservedIpRange(@Nullable Output reservedIpRange) { } /** - * @param reservedIpRange Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. If no range id is provided all ranges will be - * considered. + * @param reservedIpRange Immutable. Contains the id of the allocated IP address range + * associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + * be considered. * * @return builder * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/projects/IamMemberRemove.java b/sdk/java/src/main/java/com/pulumi/gcp/projects/IamMemberRemove.java index 9e56f58704..1dd4fc93bd 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/projects/IamMemberRemove.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/projects/IamMemberRemove.java @@ -34,6 +34,47 @@ * and * [API reference](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy). * + * ## Example Usage + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.organizations.OrganizationsFunctions;
+ * import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
+ * import com.pulumi.gcp.projects.IamMemberRemove;
+ * import com.pulumi.gcp.projects.IamMemberRemoveArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App }{{@code
+ *     public static void main(String[] args) }{{@code
+ *         Pulumi.run(App::stack);
+ *     }}{@code
+ * 
+ *     public static void stack(Context ctx) }{{@code
+ *         final var targetProject = OrganizationsFunctions.getProject();
+ * 
+ *         var foo = new IamMemberRemove("foo", IamMemberRemoveArgs.builder()
+ *             .role("roles/editor")
+ *             .project(targetProjectGoogleProject.projectId())
+ *             .member(String.format("serviceAccount:%s-compute}{@literal @}{@code developer.gserviceaccount.com", targetProjectGoogleProject.number()))
+ *             .build());
+ * 
+ *     }}{@code
+ * }}{@code
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> + * */ @ResourceType(type="gcp:projects/iamMemberRemove:IamMemberRemove") public class IamMemberRemove extends com.pulumi.resources.CustomResource { diff --git a/sdk/java/src/main/java/com/pulumi/gcp/projects/UsageExportBucket.java b/sdk/java/src/main/java/com/pulumi/gcp/projects/UsageExportBucket.java index 0c2e7a6678..7074b2aed9 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/projects/UsageExportBucket.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/projects/UsageExportBucket.java @@ -27,6 +27,10 @@ * * > This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account. * + * > It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `auto_create_network` to false, when possible. + * + * > It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. + * * To get more information about projects, see: * * * [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects) @@ -114,6 +118,44 @@ * * <!--End PulumiCodeChooser --> * + * To create a project with a tag + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.organizations.Project;
+ * import com.pulumi.gcp.organizations.ProjectArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var myProject = new Project("myProject", ProjectArgs.builder()
+ *             .name("My Project")
+ *             .projectId("your-project-id")
+ *             .orgId("1234567")
+ *             .tags(Map.of("1234567/env", "staging"))
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> + * * ## Import * * Projects can be imported using the `project_id`, e.g. diff --git a/sdk/java/src/main/java/com/pulumi/gcp/pubsub/Subscription.java b/sdk/java/src/main/java/com/pulumi/gcp/pubsub/Subscription.java index bbf6985e3b..d32ba52097 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/pubsub/Subscription.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/pubsub/Subscription.java @@ -555,6 +555,7 @@ * .filenameDatetimeFormat("YYYY-MM-DD/hh_mm_ssZ") * .maxBytes(1000) * .maxDuration("300s") + * .maxMessages(1000) * .build()) * .build(), CustomResourceOptions.builder() * .dependsOn( @@ -631,8 +632,10 @@ * .filenameDatetimeFormat("YYYY-MM-DD/hh_mm_ssZ") * .maxBytes(1000) * .maxDuration("300s") + * .maxMessages(1000) * .avroConfig(SubscriptionCloudStorageConfigAvroConfigArgs.builder() * .writeMetadata(true) + * .useTopicSchema(true) * .build()) * .build()) * .build(), CustomResourceOptions.builder() diff --git a/sdk/java/src/main/java/com/pulumi/gcp/pubsub/inputs/SubscriptionCloudStorageConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/pubsub/inputs/SubscriptionCloudStorageConfigArgs.java index 8b31900d77..7fb1773f86 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/pubsub/inputs/SubscriptionCloudStorageConfigArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/pubsub/inputs/SubscriptionCloudStorageConfigArgs.java @@ -131,6 +131,21 @@ public Optional> maxDuration() { return Optional.ofNullable(this.maxDuration); } + /** + * The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + * + */ + @Import(name="maxMessages") + private @Nullable Output maxMessages; + + /** + * @return The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + * + */ + public Optional> maxMessages() { + return Optional.ofNullable(this.maxMessages); + } + /** * The service account to use to write to Cloud Storage. If not specified, the Pub/Sub * [service agent](https://cloud.google.com/iam/docs/service-agents), @@ -177,6 +192,7 @@ private SubscriptionCloudStorageConfigArgs(SubscriptionCloudStorageConfigArgs $) this.filenameSuffix = $.filenameSuffix; this.maxBytes = $.maxBytes; this.maxDuration = $.maxDuration; + this.maxMessages = $.maxMessages; this.serviceAccountEmail = $.serviceAccountEmail; this.state = $.state; } @@ -354,6 +370,27 @@ public Builder maxDuration(String maxDuration) { return maxDuration(Output.of(maxDuration)); } + /** + * @param maxMessages The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + * + * @return builder + * + */ + public Builder maxMessages(@Nullable Output maxMessages) { + $.maxMessages = maxMessages; + return this; + } + + /** + * @param maxMessages The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + * + * @return builder + * + */ + public Builder maxMessages(Integer maxMessages) { + return maxMessages(Output.of(maxMessages)); + } + /** * @param serviceAccountEmail The service account to use to write to Cloud Storage. If not specified, the Pub/Sub * [service agent](https://cloud.google.com/iam/docs/service-agents), diff --git a/sdk/java/src/main/java/com/pulumi/gcp/pubsub/inputs/SubscriptionCloudStorageConfigAvroConfigArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/pubsub/inputs/SubscriptionCloudStorageConfigAvroConfigArgs.java index beab279b7b..d25939a4fd 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/pubsub/inputs/SubscriptionCloudStorageConfigAvroConfigArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/pubsub/inputs/SubscriptionCloudStorageConfigAvroConfigArgs.java @@ -15,6 +15,21 @@ public final class SubscriptionCloudStorageConfigAvroConfigArgs extends com.pulu public static final SubscriptionCloudStorageConfigAvroConfigArgs Empty = new SubscriptionCloudStorageConfigAvroConfigArgs(); + /** + * When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + * + */ + @Import(name="useTopicSchema") + private @Nullable Output useTopicSchema; + + /** + * @return When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + * + */ + public Optional> useTopicSchema() { + return Optional.ofNullable(this.useTopicSchema); + } + /** * When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. * @@ -33,6 +48,7 @@ public Optional> writeMetadata() { private SubscriptionCloudStorageConfigAvroConfigArgs() {} private SubscriptionCloudStorageConfigAvroConfigArgs(SubscriptionCloudStorageConfigAvroConfigArgs $) { + this.useTopicSchema = $.useTopicSchema; this.writeMetadata = $.writeMetadata; } @@ -54,6 +70,27 @@ public Builder(SubscriptionCloudStorageConfigAvroConfigArgs defaults) { $ = new SubscriptionCloudStorageConfigAvroConfigArgs(Objects.requireNonNull(defaults)); } + /** + * @param useTopicSchema When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + * + * @return builder + * + */ + public Builder useTopicSchema(@Nullable Output useTopicSchema) { + $.useTopicSchema = useTopicSchema; + return this; + } + + /** + * @param useTopicSchema When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + * + * @return builder + * + */ + public Builder useTopicSchema(Boolean useTopicSchema) { + return useTopicSchema(Output.of(useTopicSchema)); + } + /** * @param writeMetadata When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. * diff --git a/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/GetSubscriptionCloudStorageConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/GetSubscriptionCloudStorageConfig.java index 93dc1f602d..7feba8e45e 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/GetSubscriptionCloudStorageConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/GetSubscriptionCloudStorageConfig.java @@ -51,6 +51,11 @@ public final class GetSubscriptionCloudStorageConfig { * */ private String maxDuration; + /** + * @return The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + * + */ + private Integer maxMessages; /** * @return The service account to use to write to Cloud Storage. If not specified, the Pub/Sub * [service agent](https://cloud.google.com/iam/docs/service-agents), @@ -117,6 +122,13 @@ public Integer maxBytes() { public String maxDuration() { return this.maxDuration; } + /** + * @return The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + * + */ + public Integer maxMessages() { + return this.maxMessages; + } /** * @return The service account to use to write to Cloud Storage. If not specified, the Pub/Sub * [service agent](https://cloud.google.com/iam/docs/service-agents), @@ -150,6 +162,7 @@ public static final class Builder { private String filenameSuffix; private Integer maxBytes; private String maxDuration; + private Integer maxMessages; private String serviceAccountEmail; private String state; public Builder() {} @@ -162,6 +175,7 @@ public Builder(GetSubscriptionCloudStorageConfig defaults) { this.filenameSuffix = defaults.filenameSuffix; this.maxBytes = defaults.maxBytes; this.maxDuration = defaults.maxDuration; + this.maxMessages = defaults.maxMessages; this.serviceAccountEmail = defaults.serviceAccountEmail; this.state = defaults.state; } @@ -226,6 +240,14 @@ public Builder maxDuration(String maxDuration) { return this; } @CustomType.Setter + public Builder maxMessages(Integer maxMessages) { + if (maxMessages == null) { + throw new MissingRequiredPropertyException("GetSubscriptionCloudStorageConfig", "maxMessages"); + } + this.maxMessages = maxMessages; + return this; + } + @CustomType.Setter public Builder serviceAccountEmail(String serviceAccountEmail) { if (serviceAccountEmail == null) { throw new MissingRequiredPropertyException("GetSubscriptionCloudStorageConfig", "serviceAccountEmail"); @@ -250,6 +272,7 @@ public GetSubscriptionCloudStorageConfig build() { _resultValue.filenameSuffix = filenameSuffix; _resultValue.maxBytes = maxBytes; _resultValue.maxDuration = maxDuration; + _resultValue.maxMessages = maxMessages; _resultValue.serviceAccountEmail = serviceAccountEmail; _resultValue.state = state; return _resultValue; diff --git a/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/GetSubscriptionCloudStorageConfigAvroConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/GetSubscriptionCloudStorageConfigAvroConfig.java index f043dae5ca..af21f62e98 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/GetSubscriptionCloudStorageConfigAvroConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/GetSubscriptionCloudStorageConfigAvroConfig.java @@ -10,6 +10,11 @@ @CustomType public final class GetSubscriptionCloudStorageConfigAvroConfig { + /** + * @return When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + * + */ + private Boolean useTopicSchema; /** * @return When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. * @@ -17,6 +22,13 @@ public final class GetSubscriptionCloudStorageConfigAvroConfig { private Boolean writeMetadata; private GetSubscriptionCloudStorageConfigAvroConfig() {} + /** + * @return When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + * + */ + public Boolean useTopicSchema() { + return this.useTopicSchema; + } /** * @return When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. * @@ -34,13 +46,23 @@ public static Builder builder(GetSubscriptionCloudStorageConfigAvroConfig defaul } @CustomType.Builder public static final class Builder { + private Boolean useTopicSchema; private Boolean writeMetadata; public Builder() {} public Builder(GetSubscriptionCloudStorageConfigAvroConfig defaults) { Objects.requireNonNull(defaults); + this.useTopicSchema = defaults.useTopicSchema; this.writeMetadata = defaults.writeMetadata; } + @CustomType.Setter + public Builder useTopicSchema(Boolean useTopicSchema) { + if (useTopicSchema == null) { + throw new MissingRequiredPropertyException("GetSubscriptionCloudStorageConfigAvroConfig", "useTopicSchema"); + } + this.useTopicSchema = useTopicSchema; + return this; + } @CustomType.Setter public Builder writeMetadata(Boolean writeMetadata) { if (writeMetadata == null) { @@ -51,6 +73,7 @@ public Builder writeMetadata(Boolean writeMetadata) { } public GetSubscriptionCloudStorageConfigAvroConfig build() { final var _resultValue = new GetSubscriptionCloudStorageConfigAvroConfig(); + _resultValue.useTopicSchema = useTopicSchema; _resultValue.writeMetadata = writeMetadata; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/SubscriptionCloudStorageConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/SubscriptionCloudStorageConfig.java index e5679c3513..a17a06e8bc 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/SubscriptionCloudStorageConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/SubscriptionCloudStorageConfig.java @@ -53,6 +53,11 @@ public final class SubscriptionCloudStorageConfig { * */ private @Nullable String maxDuration; + /** + * @return The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + * + */ + private @Nullable Integer maxMessages; /** * @return The service account to use to write to Cloud Storage. If not specified, the Pub/Sub * [service agent](https://cloud.google.com/iam/docs/service-agents), @@ -121,6 +126,13 @@ public Optional maxBytes() { public Optional maxDuration() { return Optional.ofNullable(this.maxDuration); } + /** + * @return The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + * + */ + public Optional maxMessages() { + return Optional.ofNullable(this.maxMessages); + } /** * @return The service account to use to write to Cloud Storage. If not specified, the Pub/Sub * [service agent](https://cloud.google.com/iam/docs/service-agents), @@ -155,6 +167,7 @@ public static final class Builder { private @Nullable String filenameSuffix; private @Nullable Integer maxBytes; private @Nullable String maxDuration; + private @Nullable Integer maxMessages; private @Nullable String serviceAccountEmail; private @Nullable String state; public Builder() {} @@ -167,6 +180,7 @@ public Builder(SubscriptionCloudStorageConfig defaults) { this.filenameSuffix = defaults.filenameSuffix; this.maxBytes = defaults.maxBytes; this.maxDuration = defaults.maxDuration; + this.maxMessages = defaults.maxMessages; this.serviceAccountEmail = defaults.serviceAccountEmail; this.state = defaults.state; } @@ -216,6 +230,12 @@ public Builder maxDuration(@Nullable String maxDuration) { return this; } @CustomType.Setter + public Builder maxMessages(@Nullable Integer maxMessages) { + + this.maxMessages = maxMessages; + return this; + } + @CustomType.Setter public Builder serviceAccountEmail(@Nullable String serviceAccountEmail) { this.serviceAccountEmail = serviceAccountEmail; @@ -236,6 +256,7 @@ public SubscriptionCloudStorageConfig build() { _resultValue.filenameSuffix = filenameSuffix; _resultValue.maxBytes = maxBytes; _resultValue.maxDuration = maxDuration; + _resultValue.maxMessages = maxMessages; _resultValue.serviceAccountEmail = serviceAccountEmail; _resultValue.state = state; return _resultValue; diff --git a/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/SubscriptionCloudStorageConfigAvroConfig.java b/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/SubscriptionCloudStorageConfigAvroConfig.java index a1b283f25e..a806ac2634 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/SubscriptionCloudStorageConfigAvroConfig.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/pubsub/outputs/SubscriptionCloudStorageConfigAvroConfig.java @@ -11,6 +11,11 @@ @CustomType public final class SubscriptionCloudStorageConfigAvroConfig { + /** + * @return When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + * + */ + private @Nullable Boolean useTopicSchema; /** * @return When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. * @@ -18,6 +23,13 @@ public final class SubscriptionCloudStorageConfigAvroConfig { private @Nullable Boolean writeMetadata; private SubscriptionCloudStorageConfigAvroConfig() {} + /** + * @return When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + * + */ + public Optional useTopicSchema() { + return Optional.ofNullable(this.useTopicSchema); + } /** * @return When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. * @@ -35,13 +47,21 @@ public static Builder builder(SubscriptionCloudStorageConfigAvroConfig defaults) } @CustomType.Builder public static final class Builder { + private @Nullable Boolean useTopicSchema; private @Nullable Boolean writeMetadata; public Builder() {} public Builder(SubscriptionCloudStorageConfigAvroConfig defaults) { Objects.requireNonNull(defaults); + this.useTopicSchema = defaults.useTopicSchema; this.writeMetadata = defaults.writeMetadata; } + @CustomType.Setter + public Builder useTopicSchema(@Nullable Boolean useTopicSchema) { + + this.useTopicSchema = useTopicSchema; + return this; + } @CustomType.Setter public Builder writeMetadata(@Nullable Boolean writeMetadata) { @@ -50,6 +70,7 @@ public Builder writeMetadata(@Nullable Boolean writeMetadata) { } public SubscriptionCloudStorageConfigAvroConfig build() { final var _resultValue = new SubscriptionCloudStorageConfigAvroConfig(); + _resultValue.useTopicSchema = useTopicSchema; _resultValue.writeMetadata = writeMetadata; return _resultValue; } diff --git a/sdk/java/src/main/java/com/pulumi/gcp/redis/Cluster.java b/sdk/java/src/main/java/com/pulumi/gcp/redis/Cluster.java index 60a7f7eb67..9793dc4a99 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/redis/Cluster.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/redis/Cluster.java @@ -11,6 +11,8 @@ import com.pulumi.gcp.redis.ClusterArgs; import com.pulumi.gcp.redis.inputs.ClusterState; import com.pulumi.gcp.redis.outputs.ClusterDiscoveryEndpoint; +import com.pulumi.gcp.redis.outputs.ClusterMaintenancePolicy; +import com.pulumi.gcp.redis.outputs.ClusterMaintenanceSchedule; import com.pulumi.gcp.redis.outputs.ClusterPscConfig; import com.pulumi.gcp.redis.outputs.ClusterPscConnection; import com.pulumi.gcp.redis.outputs.ClusterStateInfo; @@ -56,6 +58,7 @@ * import com.pulumi.gcp.redis.ClusterArgs; * import com.pulumi.gcp.redis.inputs.ClusterPscConfigArgs; * import com.pulumi.gcp.redis.inputs.ClusterZoneDistributionConfigArgs; + * import com.pulumi.gcp.redis.inputs.ClusterMaintenancePolicyArgs; * import com.pulumi.resources.CustomResourceOptions; * import java.util.List; * import java.util.ArrayList; @@ -109,6 +112,17 @@ * .zoneDistributionConfig(ClusterZoneDistributionConfigArgs.builder() * .mode("MULTI_ZONE") * .build()) + * .maintenancePolicy(ClusterMaintenancePolicyArgs.builder() + * .weeklyMaintenanceWindows(ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs.builder() + * .day("MONDAY") + * .startTime(ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs.builder() + * .hours(1) + * .minutes(0) + * .seconds(0) + * .nanos(0) + * .build()) + * .build()) + * .build()) * .build(), CustomResourceOptions.builder() * .dependsOn(default_) * .build()); @@ -139,6 +153,7 @@ * import com.pulumi.gcp.redis.ClusterArgs; * import com.pulumi.gcp.redis.inputs.ClusterPscConfigArgs; * import com.pulumi.gcp.redis.inputs.ClusterZoneDistributionConfigArgs; + * import com.pulumi.gcp.redis.inputs.ClusterMaintenancePolicyArgs; * import com.pulumi.resources.CustomResourceOptions; * import java.util.List; * import java.util.ArrayList; @@ -187,6 +202,17 @@ * .mode("SINGLE_ZONE") * .zone("us-central1-f") * .build()) + * .maintenancePolicy(ClusterMaintenancePolicyArgs.builder() + * .weeklyMaintenanceWindows(ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs.builder() + * .day("MONDAY") + * .startTime(ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs.builder() + * .hours(1) + * .minutes(0) + * .seconds(0) + * .nanos(0) + * .build()) + * .build()) + * .build()) * .deletionProtectionEnabled(true) * .build(), CustomResourceOptions.builder() * .dependsOn(default_) @@ -303,6 +329,36 @@ public Output> deletionProtectionEnabled() { public Output> discoveryEndpoints() { return this.discoveryEndpoints; } + /** + * Maintenance policy for a cluster + * + */ + @Export(name="maintenancePolicy", refs={ClusterMaintenancePolicy.class}, tree="[0]") + private Output maintenancePolicy; + + /** + * @return Maintenance policy for a cluster + * + */ + public Output> maintenancePolicy() { + return Codegen.optional(this.maintenancePolicy); + } + /** + * Upcoming maintenance schedule. + * Structure is documented below. + * + */ + @Export(name="maintenanceSchedules", refs={List.class,ClusterMaintenanceSchedule.class}, tree="[0,1]") + private Output> maintenanceSchedules; + + /** + * @return Upcoming maintenance schedule. + * Structure is documented below. + * + */ + public Output> maintenanceSchedules() { + return this.maintenanceSchedules; + } /** * Unique name of the resource in this scope including project and location using the form: * projects/{projectId}/locations/{locationId}/clusters/{clusterId} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/redis/ClusterArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/redis/ClusterArgs.java index c324cf1f58..63a1e4d61b 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/redis/ClusterArgs.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/redis/ClusterArgs.java @@ -6,6 +6,7 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.redis.inputs.ClusterMaintenancePolicyArgs; import com.pulumi.gcp.redis.inputs.ClusterPscConfigArgs; import com.pulumi.gcp.redis.inputs.ClusterZoneDistributionConfigArgs; import java.lang.Boolean; @@ -58,6 +59,21 @@ public Optional> deletionProtectionEnabled() { return Optional.ofNullable(this.deletionProtectionEnabled); } + /** + * Maintenance policy for a cluster + * + */ + @Import(name="maintenancePolicy") + private @Nullable Output maintenancePolicy; + + /** + * @return Maintenance policy for a cluster + * + */ + public Optional> maintenancePolicy() { + return Optional.ofNullable(this.maintenancePolicy); + } + /** * Unique name of the resource in this scope including project and location using the form: * projects/{projectId}/locations/{locationId}/clusters/{clusterId} @@ -223,6 +239,7 @@ private ClusterArgs() {} private ClusterArgs(ClusterArgs $) { this.authorizationMode = $.authorizationMode; this.deletionProtectionEnabled = $.deletionProtectionEnabled; + this.maintenancePolicy = $.maintenancePolicy; this.name = $.name; this.nodeType = $.nodeType; this.project = $.project; @@ -301,6 +318,27 @@ public Builder deletionProtectionEnabled(Boolean deletionProtectionEnabled) { return deletionProtectionEnabled(Output.of(deletionProtectionEnabled)); } + /** + * @param maintenancePolicy Maintenance policy for a cluster + * + * @return builder + * + */ + public Builder maintenancePolicy(@Nullable Output maintenancePolicy) { + $.maintenancePolicy = maintenancePolicy; + return this; + } + + /** + * @param maintenancePolicy Maintenance policy for a cluster + * + * @return builder + * + */ + public Builder maintenancePolicy(ClusterMaintenancePolicyArgs maintenancePolicy) { + return maintenancePolicy(Output.of(maintenancePolicy)); + } + /** * @param name Unique name of the resource in this scope including project and location using the form: * projects/{projectId}/locations/{locationId}/clusters/{clusterId} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenancePolicyArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenancePolicyArgs.java new file mode 100644 index 0000000000..5d0ae55f22 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenancePolicyArgs.java @@ -0,0 +1,208 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.redis.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.gcp.redis.inputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class ClusterMaintenancePolicyArgs extends com.pulumi.resources.ResourceArgs { + + public static final ClusterMaintenancePolicyArgs Empty = new ClusterMaintenancePolicyArgs(); + + /** + * (Output) + * Output only. The time when the policy was created. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + @Import(name="createTime") + private @Nullable Output createTime; + + /** + * @return (Output) + * Output only. The time when the policy was created. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + public Optional> createTime() { + return Optional.ofNullable(this.createTime); + } + + /** + * (Output) + * Output only. The time when the policy was last updated. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + @Import(name="updateTime") + private @Nullable Output updateTime; + + /** + * @return (Output) + * Output only. The time when the policy was last updated. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + public Optional> updateTime() { + return Optional.ofNullable(this.updateTime); + } + + /** + * Optional. Maintenance window that is applied to resources covered by this policy. + * Minimum 1. For the current version, the maximum number + * of weekly_window is expected to be one. + * Structure is documented below. + * + */ + @Import(name="weeklyMaintenanceWindows") + private @Nullable Output> weeklyMaintenanceWindows; + + /** + * @return Optional. Maintenance window that is applied to resources covered by this policy. + * Minimum 1. For the current version, the maximum number + * of weekly_window is expected to be one. + * Structure is documented below. + * + */ + public Optional>> weeklyMaintenanceWindows() { + return Optional.ofNullable(this.weeklyMaintenanceWindows); + } + + private ClusterMaintenancePolicyArgs() {} + + private ClusterMaintenancePolicyArgs(ClusterMaintenancePolicyArgs $) { + this.createTime = $.createTime; + this.updateTime = $.updateTime; + this.weeklyMaintenanceWindows = $.weeklyMaintenanceWindows; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(ClusterMaintenancePolicyArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private ClusterMaintenancePolicyArgs $; + + public Builder() { + $ = new ClusterMaintenancePolicyArgs(); + } + + public Builder(ClusterMaintenancePolicyArgs defaults) { + $ = new ClusterMaintenancePolicyArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param createTime (Output) + * Output only. The time when the policy was created. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + * @return builder + * + */ + public Builder createTime(@Nullable Output createTime) { + $.createTime = createTime; + return this; + } + + /** + * @param createTime (Output) + * Output only. The time when the policy was created. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + * @return builder + * + */ + public Builder createTime(String createTime) { + return createTime(Output.of(createTime)); + } + + /** + * @param updateTime (Output) + * Output only. The time when the policy was last updated. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + * @return builder + * + */ + public Builder updateTime(@Nullable Output updateTime) { + $.updateTime = updateTime; + return this; + } + + /** + * @param updateTime (Output) + * Output only. The time when the policy was last updated. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + * @return builder + * + */ + public Builder updateTime(String updateTime) { + return updateTime(Output.of(updateTime)); + } + + /** + * @param weeklyMaintenanceWindows Optional. Maintenance window that is applied to resources covered by this policy. + * Minimum 1. For the current version, the maximum number + * of weekly_window is expected to be one. + * Structure is documented below. + * + * @return builder + * + */ + public Builder weeklyMaintenanceWindows(@Nullable Output> weeklyMaintenanceWindows) { + $.weeklyMaintenanceWindows = weeklyMaintenanceWindows; + return this; + } + + /** + * @param weeklyMaintenanceWindows Optional. Maintenance window that is applied to resources covered by this policy. + * Minimum 1. For the current version, the maximum number + * of weekly_window is expected to be one. + * Structure is documented below. + * + * @return builder + * + */ + public Builder weeklyMaintenanceWindows(List weeklyMaintenanceWindows) { + return weeklyMaintenanceWindows(Output.of(weeklyMaintenanceWindows)); + } + + /** + * @param weeklyMaintenanceWindows Optional. Maintenance window that is applied to resources covered by this policy. + * Minimum 1. For the current version, the maximum number + * of weekly_window is expected to be one. + * Structure is documented below. + * + * @return builder + * + */ + public Builder weeklyMaintenanceWindows(ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs... weeklyMaintenanceWindows) { + return weeklyMaintenanceWindows(List.of(weeklyMaintenanceWindows)); + } + + public ClusterMaintenancePolicyArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs.java new file mode 100644 index 0000000000..322cd2569f --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs.java @@ -0,0 +1,221 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.redis.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.redis.inputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs extends com.pulumi.resources.ResourceArgs { + + public static final ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs Empty = new ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs(); + + /** + * Required. The day of week that maintenance updates occur. + * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + * - MONDAY: Monday + * - TUESDAY: Tuesday + * - WEDNESDAY: Wednesday + * - THURSDAY: Thursday + * - FRIDAY: Friday + * - SATURDAY: Saturday + * - SUNDAY: Sunday + * Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + * + */ + @Import(name="day", required=true) + private Output day; + + /** + * @return Required. The day of week that maintenance updates occur. + * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + * - MONDAY: Monday + * - TUESDAY: Tuesday + * - WEDNESDAY: Wednesday + * - THURSDAY: Thursday + * - FRIDAY: Friday + * - SATURDAY: Saturday + * - SUNDAY: Sunday + * Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + * + */ + public Output day() { + return this.day; + } + + /** + * (Output) + * Output only. Duration of the maintenance window. + * The current window is fixed at 1 hour. + * A duration in seconds with up to nine fractional digits, + * terminated by 's'. Example: "3.5s". + * + */ + @Import(name="duration") + private @Nullable Output duration; + + /** + * @return (Output) + * Output only. Duration of the maintenance window. + * The current window is fixed at 1 hour. + * A duration in seconds with up to nine fractional digits, + * terminated by 's'. Example: "3.5s". + * + */ + public Optional> duration() { + return Optional.ofNullable(this.duration); + } + + /** + * Required. Start time of the window in UTC time. + * Structure is documented below. + * + */ + @Import(name="startTime", required=true) + private Output startTime; + + /** + * @return Required. Start time of the window in UTC time. + * Structure is documented below. + * + */ + public Output startTime() { + return this.startTime; + } + + private ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs() {} + + private ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs(ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs $) { + this.day = $.day; + this.duration = $.duration; + this.startTime = $.startTime; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs $; + + public Builder() { + $ = new ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs(); + } + + public Builder(ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs defaults) { + $ = new ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param day Required. The day of week that maintenance updates occur. + * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + * - MONDAY: Monday + * - TUESDAY: Tuesday + * - WEDNESDAY: Wednesday + * - THURSDAY: Thursday + * - FRIDAY: Friday + * - SATURDAY: Saturday + * - SUNDAY: Sunday + * Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + * + * @return builder + * + */ + public Builder day(Output day) { + $.day = day; + return this; + } + + /** + * @param day Required. The day of week that maintenance updates occur. + * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + * - MONDAY: Monday + * - TUESDAY: Tuesday + * - WEDNESDAY: Wednesday + * - THURSDAY: Thursday + * - FRIDAY: Friday + * - SATURDAY: Saturday + * - SUNDAY: Sunday + * Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + * + * @return builder + * + */ + public Builder day(String day) { + return day(Output.of(day)); + } + + /** + * @param duration (Output) + * Output only. Duration of the maintenance window. + * The current window is fixed at 1 hour. + * A duration in seconds with up to nine fractional digits, + * terminated by 's'. Example: "3.5s". + * + * @return builder + * + */ + public Builder duration(@Nullable Output duration) { + $.duration = duration; + return this; + } + + /** + * @param duration (Output) + * Output only. Duration of the maintenance window. + * The current window is fixed at 1 hour. + * A duration in seconds with up to nine fractional digits, + * terminated by 's'. Example: "3.5s". + * + * @return builder + * + */ + public Builder duration(String duration) { + return duration(Output.of(duration)); + } + + /** + * @param startTime Required. Start time of the window in UTC time. + * Structure is documented below. + * + * @return builder + * + */ + public Builder startTime(Output startTime) { + $.startTime = startTime; + return this; + } + + /** + * @param startTime Required. Start time of the window in UTC time. + * Structure is documented below. + * + * @return builder + * + */ + public Builder startTime(ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs startTime) { + return startTime(Output.of(startTime)); + } + + public ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs build() { + if ($.day == null) { + throw new MissingRequiredPropertyException("ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs", "day"); + } + if ($.startTime == null) { + throw new MissingRequiredPropertyException("ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs", "startTime"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs.java new file mode 100644 index 0000000000..4547a20a1e --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs.java @@ -0,0 +1,202 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.redis.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.Integer; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs extends com.pulumi.resources.ResourceArgs { + + public static final ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs Empty = new ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs(); + + /** + * Hours of day in 24 hour format. Should be from 0 to 23. + * An API may choose to allow the value "24:00:00" for scenarios like business closing time. + * + */ + @Import(name="hours") + private @Nullable Output hours; + + /** + * @return Hours of day in 24 hour format. Should be from 0 to 23. + * An API may choose to allow the value "24:00:00" for scenarios like business closing time. + * + */ + public Optional> hours() { + return Optional.ofNullable(this.hours); + } + + /** + * Minutes of hour of day. Must be from 0 to 59. + * + */ + @Import(name="minutes") + private @Nullable Output minutes; + + /** + * @return Minutes of hour of day. Must be from 0 to 59. + * + */ + public Optional> minutes() { + return Optional.ofNullable(this.minutes); + } + + /** + * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + * + */ + @Import(name="nanos") + private @Nullable Output nanos; + + /** + * @return Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + * + */ + public Optional> nanos() { + return Optional.ofNullable(this.nanos); + } + + /** + * Seconds of minutes of the time. Must normally be from 0 to 59. + * An API may allow the value 60 if it allows leap-seconds. + * + */ + @Import(name="seconds") + private @Nullable Output seconds; + + /** + * @return Seconds of minutes of the time. Must normally be from 0 to 59. + * An API may allow the value 60 if it allows leap-seconds. + * + */ + public Optional> seconds() { + return Optional.ofNullable(this.seconds); + } + + private ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs() {} + + private ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs(ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs $) { + this.hours = $.hours; + this.minutes = $.minutes; + this.nanos = $.nanos; + this.seconds = $.seconds; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs $; + + public Builder() { + $ = new ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs(); + } + + public Builder(ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs defaults) { + $ = new ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param hours Hours of day in 24 hour format. Should be from 0 to 23. + * An API may choose to allow the value "24:00:00" for scenarios like business closing time. + * + * @return builder + * + */ + public Builder hours(@Nullable Output hours) { + $.hours = hours; + return this; + } + + /** + * @param hours Hours of day in 24 hour format. Should be from 0 to 23. + * An API may choose to allow the value "24:00:00" for scenarios like business closing time. + * + * @return builder + * + */ + public Builder hours(Integer hours) { + return hours(Output.of(hours)); + } + + /** + * @param minutes Minutes of hour of day. Must be from 0 to 59. + * + * @return builder + * + */ + public Builder minutes(@Nullable Output minutes) { + $.minutes = minutes; + return this; + } + + /** + * @param minutes Minutes of hour of day. Must be from 0 to 59. + * + * @return builder + * + */ + public Builder minutes(Integer minutes) { + return minutes(Output.of(minutes)); + } + + /** + * @param nanos Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + * + * @return builder + * + */ + public Builder nanos(@Nullable Output nanos) { + $.nanos = nanos; + return this; + } + + /** + * @param nanos Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + * + * @return builder + * + */ + public Builder nanos(Integer nanos) { + return nanos(Output.of(nanos)); + } + + /** + * @param seconds Seconds of minutes of the time. Must normally be from 0 to 59. + * An API may allow the value 60 if it allows leap-seconds. + * + * @return builder + * + */ + public Builder seconds(@Nullable Output seconds) { + $.seconds = seconds; + return this; + } + + /** + * @param seconds Seconds of minutes of the time. Must normally be from 0 to 59. + * An API may allow the value 60 if it allows leap-seconds. + * + * @return builder + * + */ + public Builder seconds(Integer seconds) { + return seconds(Output.of(seconds)); + } + + public ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenanceScheduleArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenanceScheduleArgs.java new file mode 100644 index 0000000000..fa17b304d8 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterMaintenanceScheduleArgs.java @@ -0,0 +1,197 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.redis.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class ClusterMaintenanceScheduleArgs extends com.pulumi.resources.ResourceArgs { + + public static final ClusterMaintenanceScheduleArgs Empty = new ClusterMaintenanceScheduleArgs(); + + /** + * (Output) + * Output only. The end time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + @Import(name="endTime") + private @Nullable Output endTime; + + /** + * @return (Output) + * Output only. The end time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + public Optional> endTime() { + return Optional.ofNullable(this.endTime); + } + + /** + * (Output) + * Output only. The deadline that the maintenance schedule start time + * can not go beyond, including reschedule. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + @Import(name="scheduleDeadlineTime") + private @Nullable Output scheduleDeadlineTime; + + /** + * @return (Output) + * Output only. The deadline that the maintenance schedule start time + * can not go beyond, including reschedule. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + public Optional> scheduleDeadlineTime() { + return Optional.ofNullable(this.scheduleDeadlineTime); + } + + /** + * (Output) + * Output only. The start time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + @Import(name="startTime") + private @Nullable Output startTime; + + /** + * @return (Output) + * Output only. The start time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + public Optional> startTime() { + return Optional.ofNullable(this.startTime); + } + + private ClusterMaintenanceScheduleArgs() {} + + private ClusterMaintenanceScheduleArgs(ClusterMaintenanceScheduleArgs $) { + this.endTime = $.endTime; + this.scheduleDeadlineTime = $.scheduleDeadlineTime; + this.startTime = $.startTime; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(ClusterMaintenanceScheduleArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private ClusterMaintenanceScheduleArgs $; + + public Builder() { + $ = new ClusterMaintenanceScheduleArgs(); + } + + public Builder(ClusterMaintenanceScheduleArgs defaults) { + $ = new ClusterMaintenanceScheduleArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param endTime (Output) + * Output only. The end time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + * @return builder + * + */ + public Builder endTime(@Nullable Output endTime) { + $.endTime = endTime; + return this; + } + + /** + * @param endTime (Output) + * Output only. The end time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + * @return builder + * + */ + public Builder endTime(String endTime) { + return endTime(Output.of(endTime)); + } + + /** + * @param scheduleDeadlineTime (Output) + * Output only. The deadline that the maintenance schedule start time + * can not go beyond, including reschedule. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + * @return builder + * + */ + public Builder scheduleDeadlineTime(@Nullable Output scheduleDeadlineTime) { + $.scheduleDeadlineTime = scheduleDeadlineTime; + return this; + } + + /** + * @param scheduleDeadlineTime (Output) + * Output only. The deadline that the maintenance schedule start time + * can not go beyond, including reschedule. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + * @return builder + * + */ + public Builder scheduleDeadlineTime(String scheduleDeadlineTime) { + return scheduleDeadlineTime(Output.of(scheduleDeadlineTime)); + } + + /** + * @param startTime (Output) + * Output only. The start time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + * @return builder + * + */ + public Builder startTime(@Nullable Output startTime) { + $.startTime = startTime; + return this; + } + + /** + * @param startTime (Output) + * Output only. The start time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + * @return builder + * + */ + public Builder startTime(String startTime) { + return startTime(Output.of(startTime)); + } + + public ClusterMaintenanceScheduleArgs build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterState.java b/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterState.java index d37c187128..b43caddc4f 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterState.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/redis/inputs/ClusterState.java @@ -6,6 +6,8 @@ import com.pulumi.core.Output; import com.pulumi.core.annotations.Import; import com.pulumi.gcp.redis.inputs.ClusterDiscoveryEndpointArgs; +import com.pulumi.gcp.redis.inputs.ClusterMaintenancePolicyArgs; +import com.pulumi.gcp.redis.inputs.ClusterMaintenanceScheduleArgs; import com.pulumi.gcp.redis.inputs.ClusterPscConfigArgs; import com.pulumi.gcp.redis.inputs.ClusterPscConnectionArgs; import com.pulumi.gcp.redis.inputs.ClusterStateInfoArgs; @@ -101,6 +103,38 @@ public Optional>> discoveryEndpoints() return Optional.ofNullable(this.discoveryEndpoints); } + /** + * Maintenance policy for a cluster + * + */ + @Import(name="maintenancePolicy") + private @Nullable Output maintenancePolicy; + + /** + * @return Maintenance policy for a cluster + * + */ + public Optional> maintenancePolicy() { + return Optional.ofNullable(this.maintenancePolicy); + } + + /** + * Upcoming maintenance schedule. + * Structure is documented below. + * + */ + @Import(name="maintenanceSchedules") + private @Nullable Output> maintenanceSchedules; + + /** + * @return Upcoming maintenance schedule. + * Structure is documented below. + * + */ + public Optional>> maintenanceSchedules() { + return Optional.ofNullable(this.maintenanceSchedules); + } + /** * Unique name of the resource in this scope including project and location using the form: * projects/{projectId}/locations/{locationId}/clusters/{clusterId} @@ -362,6 +396,8 @@ private ClusterState(ClusterState $) { this.createTime = $.createTime; this.deletionProtectionEnabled = $.deletionProtectionEnabled; this.discoveryEndpoints = $.discoveryEndpoints; + this.maintenancePolicy = $.maintenancePolicy; + this.maintenanceSchedules = $.maintenanceSchedules; this.name = $.name; this.nodeType = $.nodeType; this.preciseSizeGb = $.preciseSizeGb; @@ -511,6 +547,61 @@ public Builder discoveryEndpoints(ClusterDiscoveryEndpointArgs... discoveryEndpo return discoveryEndpoints(List.of(discoveryEndpoints)); } + /** + * @param maintenancePolicy Maintenance policy for a cluster + * + * @return builder + * + */ + public Builder maintenancePolicy(@Nullable Output maintenancePolicy) { + $.maintenancePolicy = maintenancePolicy; + return this; + } + + /** + * @param maintenancePolicy Maintenance policy for a cluster + * + * @return builder + * + */ + public Builder maintenancePolicy(ClusterMaintenancePolicyArgs maintenancePolicy) { + return maintenancePolicy(Output.of(maintenancePolicy)); + } + + /** + * @param maintenanceSchedules Upcoming maintenance schedule. + * Structure is documented below. + * + * @return builder + * + */ + public Builder maintenanceSchedules(@Nullable Output> maintenanceSchedules) { + $.maintenanceSchedules = maintenanceSchedules; + return this; + } + + /** + * @param maintenanceSchedules Upcoming maintenance schedule. + * Structure is documented below. + * + * @return builder + * + */ + public Builder maintenanceSchedules(List maintenanceSchedules) { + return maintenanceSchedules(Output.of(maintenanceSchedules)); + } + + /** + * @param maintenanceSchedules Upcoming maintenance schedule. + * Structure is documented below. + * + * @return builder + * + */ + public Builder maintenanceSchedules(ClusterMaintenanceScheduleArgs... maintenanceSchedules) { + return maintenanceSchedules(List.of(maintenanceSchedules)); + } + /** * @param name Unique name of the resource in this scope including project and location using the form: * projects/{projectId}/locations/{locationId}/clusters/{clusterId} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenancePolicy.java b/sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenancePolicy.java new file mode 100644 index 0000000000..0b6e5ea952 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenancePolicy.java @@ -0,0 +1,122 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.redis.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.gcp.redis.outputs.ClusterMaintenancePolicyWeeklyMaintenanceWindow; +import java.lang.String; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class ClusterMaintenancePolicy { + /** + * @return (Output) + * Output only. The time when the policy was created. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + private @Nullable String createTime; + /** + * @return (Output) + * Output only. The time when the policy was last updated. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + private @Nullable String updateTime; + /** + * @return Optional. Maintenance window that is applied to resources covered by this policy. + * Minimum 1. For the current version, the maximum number + * of weekly_window is expected to be one. + * Structure is documented below. + * + */ + private @Nullable List weeklyMaintenanceWindows; + + private ClusterMaintenancePolicy() {} + /** + * @return (Output) + * Output only. The time when the policy was created. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + public Optional createTime() { + return Optional.ofNullable(this.createTime); + } + /** + * @return (Output) + * Output only. The time when the policy was last updated. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + public Optional updateTime() { + return Optional.ofNullable(this.updateTime); + } + /** + * @return Optional. Maintenance window that is applied to resources covered by this policy. + * Minimum 1. For the current version, the maximum number + * of weekly_window is expected to be one. + * Structure is documented below. + * + */ + public List weeklyMaintenanceWindows() { + return this.weeklyMaintenanceWindows == null ? List.of() : this.weeklyMaintenanceWindows; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(ClusterMaintenancePolicy defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String createTime; + private @Nullable String updateTime; + private @Nullable List weeklyMaintenanceWindows; + public Builder() {} + public Builder(ClusterMaintenancePolicy defaults) { + Objects.requireNonNull(defaults); + this.createTime = defaults.createTime; + this.updateTime = defaults.updateTime; + this.weeklyMaintenanceWindows = defaults.weeklyMaintenanceWindows; + } + + @CustomType.Setter + public Builder createTime(@Nullable String createTime) { + + this.createTime = createTime; + return this; + } + @CustomType.Setter + public Builder updateTime(@Nullable String updateTime) { + + this.updateTime = updateTime; + return this; + } + @CustomType.Setter + public Builder weeklyMaintenanceWindows(@Nullable List weeklyMaintenanceWindows) { + + this.weeklyMaintenanceWindows = weeklyMaintenanceWindows; + return this; + } + public Builder weeklyMaintenanceWindows(ClusterMaintenancePolicyWeeklyMaintenanceWindow... weeklyMaintenanceWindows) { + return weeklyMaintenanceWindows(List.of(weeklyMaintenanceWindows)); + } + public ClusterMaintenancePolicy build() { + final var _resultValue = new ClusterMaintenancePolicy(); + _resultValue.createTime = createTime; + _resultValue.updateTime = updateTime; + _resultValue.weeklyMaintenanceWindows = weeklyMaintenanceWindows; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindow.java b/sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindow.java new file mode 100644 index 0000000000..35bdaebb6a --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindow.java @@ -0,0 +1,133 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.redis.outputs; + +import com.pulumi.core.annotations.CustomType; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import com.pulumi.gcp.redis.outputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class ClusterMaintenancePolicyWeeklyMaintenanceWindow { + /** + * @return Required. The day of week that maintenance updates occur. + * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + * - MONDAY: Monday + * - TUESDAY: Tuesday + * - WEDNESDAY: Wednesday + * - THURSDAY: Thursday + * - FRIDAY: Friday + * - SATURDAY: Saturday + * - SUNDAY: Sunday + * Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + * + */ + private String day; + /** + * @return (Output) + * Output only. Duration of the maintenance window. + * The current window is fixed at 1 hour. + * A duration in seconds with up to nine fractional digits, + * terminated by 's'. Example: "3.5s". + * + */ + private @Nullable String duration; + /** + * @return Required. Start time of the window in UTC time. + * Structure is documented below. + * + */ + private ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime startTime; + + private ClusterMaintenancePolicyWeeklyMaintenanceWindow() {} + /** + * @return Required. The day of week that maintenance updates occur. + * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + * - MONDAY: Monday + * - TUESDAY: Tuesday + * - WEDNESDAY: Wednesday + * - THURSDAY: Thursday + * - FRIDAY: Friday + * - SATURDAY: Saturday + * - SUNDAY: Sunday + * Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + * + */ + public String day() { + return this.day; + } + /** + * @return (Output) + * Output only. Duration of the maintenance window. + * The current window is fixed at 1 hour. + * A duration in seconds with up to nine fractional digits, + * terminated by 's'. Example: "3.5s". + * + */ + public Optional duration() { + return Optional.ofNullable(this.duration); + } + /** + * @return Required. Start time of the window in UTC time. + * Structure is documented below. + * + */ + public ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime startTime() { + return this.startTime; + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(ClusterMaintenancePolicyWeeklyMaintenanceWindow defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private String day; + private @Nullable String duration; + private ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime startTime; + public Builder() {} + public Builder(ClusterMaintenancePolicyWeeklyMaintenanceWindow defaults) { + Objects.requireNonNull(defaults); + this.day = defaults.day; + this.duration = defaults.duration; + this.startTime = defaults.startTime; + } + + @CustomType.Setter + public Builder day(String day) { + if (day == null) { + throw new MissingRequiredPropertyException("ClusterMaintenancePolicyWeeklyMaintenanceWindow", "day"); + } + this.day = day; + return this; + } + @CustomType.Setter + public Builder duration(@Nullable String duration) { + + this.duration = duration; + return this; + } + @CustomType.Setter + public Builder startTime(ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime startTime) { + if (startTime == null) { + throw new MissingRequiredPropertyException("ClusterMaintenancePolicyWeeklyMaintenanceWindow", "startTime"); + } + this.startTime = startTime; + return this; + } + public ClusterMaintenancePolicyWeeklyMaintenanceWindow build() { + final var _resultValue = new ClusterMaintenancePolicyWeeklyMaintenanceWindow(); + _resultValue.day = day; + _resultValue.duration = duration; + _resultValue.startTime = startTime; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime.java b/sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime.java new file mode 100644 index 0000000000..5ef667898f --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime.java @@ -0,0 +1,124 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.redis.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.Integer; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime { + /** + * @return Hours of day in 24 hour format. Should be from 0 to 23. + * An API may choose to allow the value "24:00:00" for scenarios like business closing time. + * + */ + private @Nullable Integer hours; + /** + * @return Minutes of hour of day. Must be from 0 to 59. + * + */ + private @Nullable Integer minutes; + /** + * @return Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + * + */ + private @Nullable Integer nanos; + /** + * @return Seconds of minutes of the time. Must normally be from 0 to 59. + * An API may allow the value 60 if it allows leap-seconds. + * + */ + private @Nullable Integer seconds; + + private ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime() {} + /** + * @return Hours of day in 24 hour format. Should be from 0 to 23. + * An API may choose to allow the value "24:00:00" for scenarios like business closing time. + * + */ + public Optional hours() { + return Optional.ofNullable(this.hours); + } + /** + * @return Minutes of hour of day. Must be from 0 to 59. + * + */ + public Optional minutes() { + return Optional.ofNullable(this.minutes); + } + /** + * @return Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + * + */ + public Optional nanos() { + return Optional.ofNullable(this.nanos); + } + /** + * @return Seconds of minutes of the time. Must normally be from 0 to 59. + * An API may allow the value 60 if it allows leap-seconds. + * + */ + public Optional seconds() { + return Optional.ofNullable(this.seconds); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable Integer hours; + private @Nullable Integer minutes; + private @Nullable Integer nanos; + private @Nullable Integer seconds; + public Builder() {} + public Builder(ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime defaults) { + Objects.requireNonNull(defaults); + this.hours = defaults.hours; + this.minutes = defaults.minutes; + this.nanos = defaults.nanos; + this.seconds = defaults.seconds; + } + + @CustomType.Setter + public Builder hours(@Nullable Integer hours) { + + this.hours = hours; + return this; + } + @CustomType.Setter + public Builder minutes(@Nullable Integer minutes) { + + this.minutes = minutes; + return this; + } + @CustomType.Setter + public Builder nanos(@Nullable Integer nanos) { + + this.nanos = nanos; + return this; + } + @CustomType.Setter + public Builder seconds(@Nullable Integer seconds) { + + this.seconds = seconds; + return this; + } + public ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime build() { + final var _resultValue = new ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime(); + _resultValue.hours = hours; + _resultValue.minutes = minutes; + _resultValue.nanos = nanos; + _resultValue.seconds = seconds; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenanceSchedule.java b/sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenanceSchedule.java new file mode 100644 index 0000000000..9ba36d3e30 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/redis/outputs/ClusterMaintenanceSchedule.java @@ -0,0 +1,119 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.redis.outputs; + +import com.pulumi.core.annotations.CustomType; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + +@CustomType +public final class ClusterMaintenanceSchedule { + /** + * @return (Output) + * Output only. The end time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + private @Nullable String endTime; + /** + * @return (Output) + * Output only. The deadline that the maintenance schedule start time + * can not go beyond, including reschedule. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + private @Nullable String scheduleDeadlineTime; + /** + * @return (Output) + * Output only. The start time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + private @Nullable String startTime; + + private ClusterMaintenanceSchedule() {} + /** + * @return (Output) + * Output only. The end time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + public Optional endTime() { + return Optional.ofNullable(this.endTime); + } + /** + * @return (Output) + * Output only. The deadline that the maintenance schedule start time + * can not go beyond, including reschedule. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + public Optional scheduleDeadlineTime() { + return Optional.ofNullable(this.scheduleDeadlineTime); + } + /** + * @return (Output) + * Output only. The start time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + * + */ + public Optional startTime() { + return Optional.ofNullable(this.startTime); + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(ClusterMaintenanceSchedule defaults) { + return new Builder(defaults); + } + @CustomType.Builder + public static final class Builder { + private @Nullable String endTime; + private @Nullable String scheduleDeadlineTime; + private @Nullable String startTime; + public Builder() {} + public Builder(ClusterMaintenanceSchedule defaults) { + Objects.requireNonNull(defaults); + this.endTime = defaults.endTime; + this.scheduleDeadlineTime = defaults.scheduleDeadlineTime; + this.startTime = defaults.startTime; + } + + @CustomType.Setter + public Builder endTime(@Nullable String endTime) { + + this.endTime = endTime; + return this; + } + @CustomType.Setter + public Builder scheduleDeadlineTime(@Nullable String scheduleDeadlineTime) { + + this.scheduleDeadlineTime = scheduleDeadlineTime; + return this; + } + @CustomType.Setter + public Builder startTime(@Nullable String startTime) { + + this.startTime = startTime; + return this; + } + public ClusterMaintenanceSchedule build() { + final var _resultValue = new ClusterMaintenanceSchedule(); + _resultValue.endTime = endTime; + _resultValue.scheduleDeadlineTime = scheduleDeadlineTime; + _resultValue.startTime = startTime; + return _resultValue; + } + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2FolderSccBigQueryExport.java b/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2FolderSccBigQueryExport.java new file mode 100644 index 0000000000..580c4e1833 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2FolderSccBigQueryExport.java @@ -0,0 +1,388 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.securitycenter; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Export; +import com.pulumi.core.annotations.ResourceType; +import com.pulumi.core.internal.Codegen; +import com.pulumi.gcp.Utilities; +import com.pulumi.gcp.securitycenter.V2FolderSccBigQueryExportArgs; +import com.pulumi.gcp.securitycenter.inputs.V2FolderSccBigQueryExportState; +import java.lang.String; +import java.util.Optional; +import javax.annotation.Nullable; + +/** + * A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + * It represents exporting Security Command Center data, including assets, findings, and security marks + * using gcloud scc bqexports + * > **Note:** In order to use Cloud SCC resources, your organization must be enrolled + * in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + * Without doing so, you may run into errors during resource creation. + * + * To get more information about FolderSccBigQueryExport, see: + * + * * [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/folders.locations.bigQueryExports) + * * How-to Guides + * * [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query) + * + * ## Example Usage + * + * ### Scc V2 Folder Big Query Export Config Basic + * + * <!--Start PulumiCodeChooser --> + *
+ * {@code
+ * package generated_program;
+ * 
+ * import com.pulumi.Context;
+ * import com.pulumi.Pulumi;
+ * import com.pulumi.core.Output;
+ * import com.pulumi.gcp.organizations.Folder;
+ * import com.pulumi.gcp.organizations.FolderArgs;
+ * import com.pulumi.gcp.bigquery.Dataset;
+ * import com.pulumi.gcp.bigquery.DatasetArgs;
+ * import com.pulumi.gcp.securitycenter.V2FolderSccBigQueryExport;
+ * import com.pulumi.gcp.securitycenter.V2FolderSccBigQueryExportArgs;
+ * import java.util.List;
+ * import java.util.ArrayList;
+ * import java.util.Map;
+ * import java.io.File;
+ * import java.nio.file.Files;
+ * import java.nio.file.Paths;
+ * 
+ * public class App {
+ *     public static void main(String[] args) {
+ *         Pulumi.run(App::stack);
+ *     }
+ * 
+ *     public static void stack(Context ctx) {
+ *         var folder = new Folder("folder", FolderArgs.builder()
+ *             .parent("organizations/123456789")
+ *             .displayName("folder-name")
+ *             .deletionProtection(false)
+ *             .build());
+ * 
+ *         var default_ = new Dataset("default", DatasetArgs.builder()
+ *             .datasetId("my_dataset_id")
+ *             .friendlyName("test")
+ *             .description("This is a test description")
+ *             .location("US")
+ *             .defaultTableExpirationMs(3600000)
+ *             .defaultPartitionExpirationMs(null)
+ *             .labels(Map.of("env", "default"))
+ *             .build());
+ * 
+ *         var customBigQueryExportConfig = new V2FolderSccBigQueryExport("customBigQueryExportConfig", V2FolderSccBigQueryExportArgs.builder()
+ *             .bigQueryExportId("my-export")
+ *             .folder(folder.folderId())
+ *             .dataset(default_.id())
+ *             .location("global")
+ *             .description("Cloud Security Command Center Findings Big Query Export Config")
+ *             .filter("state=\"ACTIVE\" AND NOT mute=\"MUTED\"")
+ *             .build());
+ * 
+ *     }
+ * }
+ * }
+ * 
+ * <!--End PulumiCodeChooser --> + * + * ## Import + * + * FolderSccBigQueryExport can be imported using any of these accepted formats: + * + * * `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}` + * + * * `{{folder}}/{{location}}/{{big_query_export_id}}` + * + * When using the `pulumi import` command, FolderSccBigQueryExport can be imported using one of the formats above. For example: + * + * ```sh + * $ pulumi import gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport default folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} + * ``` + * + * ```sh + * $ pulumi import gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport default {{folder}}/{{location}}/{{big_query_export_id}} + * ``` + * + */ +@ResourceType(type="gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport") +public class V2FolderSccBigQueryExport extends com.pulumi.resources.CustomResource { + /** + * This must be unique within the organization. It must consist of only lowercase letters, + * numbers, and hyphens, must start with a letter, must end with either a letter or a number, + * and must be 63 characters or less. + * + * *** + * + */ + @Export(name="bigQueryExportId", refs={String.class}, tree="[0]") + private Output bigQueryExportId; + + /** + * @return This must be unique within the organization. It must consist of only lowercase letters, + * numbers, and hyphens, must start with a letter, must end with either a letter or a number, + * and must be 63 characters or less. + * + * *** + * + */ + public Output bigQueryExportId() { + return this.bigQueryExportId; + } + /** + * The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + @Export(name="createTime", refs={String.class}, tree="[0]") + private Output createTime; + + /** + * @return The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + public Output createTime() { + return this.createTime; + } + /** + * The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + */ + @Export(name="dataset", refs={String.class}, tree="[0]") + private Output dataset; + + /** + * @return The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + */ + public Output> dataset() { + return Codegen.optional(this.dataset); + } + /** + * The description of the notification config (max of 1024 characters). + * + */ + @Export(name="description", refs={String.class}, tree="[0]") + private Output description; + + /** + * @return The description of the notification config (max of 1024 characters). + * + */ + public Output> description() { + return Codegen.optional(this.description); + } + /** + * Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + */ + @Export(name="filter", refs={String.class}, tree="[0]") + private Output filter; + + /** + * @return Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + */ + public Output> filter() { + return Codegen.optional(this.filter); + } + /** + * The folder where Cloud Security Command Center Big Query Export + * Config lives in. + * + */ + @Export(name="folder", refs={String.class}, tree="[0]") + private Output folder; + + /** + * @return The folder where Cloud Security Command Center Big Query Export + * Config lives in. + * + */ + public Output folder() { + return this.folder; + } + /** + * The BigQuery export configuration is stored in this location. If not provided, Use global as default. + * + */ + @Export(name="location", refs={String.class}, tree="[0]") + private Output location; + + /** + * @return The BigQuery export configuration is stored in this location. If not provided, Use global as default. + * + */ + public Output> location() { + return Codegen.optional(this.location); + } + /** + * Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + * + */ + @Export(name="mostRecentEditor", refs={String.class}, tree="[0]") + private Output mostRecentEditor; + + /** + * @return Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + * + */ + public Output mostRecentEditor() { + return this.mostRecentEditor; + } + /** + * The resource name of this export, in the format + * `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + * + */ + @Export(name="name", refs={String.class}, tree="[0]") + private Output name; + + /** + * @return The resource name of this export, in the format + * `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + * + */ + public Output name() { + return this.name; + } + /** + * The service account that needs permission to create table and upload data to the BigQuery dataset. + * + */ + @Export(name="principal", refs={String.class}, tree="[0]") + private Output principal; + + /** + * @return The service account that needs permission to create table and upload data to the BigQuery dataset. + * + */ + public Output principal() { + return this.principal; + } + /** + * The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + @Export(name="updateTime", refs={String.class}, tree="[0]") + private Output updateTime; + + /** + * @return The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + public Output updateTime() { + return this.updateTime; + } + + /** + * + * @param name The _unique_ name of the resulting resource. + */ + public V2FolderSccBigQueryExport(java.lang.String name) { + this(name, V2FolderSccBigQueryExportArgs.Empty); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + */ + public V2FolderSccBigQueryExport(java.lang.String name, V2FolderSccBigQueryExportArgs args) { + this(name, args, null); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + * @param options A bag of options that control this resource's behavior. + */ + public V2FolderSccBigQueryExport(java.lang.String name, V2FolderSccBigQueryExportArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport", name, makeArgs(args, options), makeResourceOptions(options, Codegen.empty()), false); + } + + private V2FolderSccBigQueryExport(java.lang.String name, Output id, @Nullable V2FolderSccBigQueryExportState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport", name, state, makeResourceOptions(options, id), false); + } + + private static V2FolderSccBigQueryExportArgs makeArgs(V2FolderSccBigQueryExportArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { + if (options != null && options.getUrn().isPresent()) { + return null; + } + return args == null ? V2FolderSccBigQueryExportArgs.Empty : args; + } + + private static com.pulumi.resources.CustomResourceOptions makeResourceOptions(@Nullable com.pulumi.resources.CustomResourceOptions options, @Nullable Output id) { + var defaultOptions = com.pulumi.resources.CustomResourceOptions.builder() + .version(Utilities.getVersion()) + .build(); + return com.pulumi.resources.CustomResourceOptions.merge(defaultOptions, options, id); + } + + /** + * Get an existing Host resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state + * @param options Optional settings to control the behavior of the CustomResource. + */ + public static V2FolderSccBigQueryExport get(java.lang.String name, Output id, @Nullable V2FolderSccBigQueryExportState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + return new V2FolderSccBigQueryExport(name, id, state, options); + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2FolderSccBigQueryExportArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2FolderSccBigQueryExportArgs.java new file mode 100644 index 0000000000..0c904d3552 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2FolderSccBigQueryExportArgs.java @@ -0,0 +1,375 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.securitycenter; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class V2FolderSccBigQueryExportArgs extends com.pulumi.resources.ResourceArgs { + + public static final V2FolderSccBigQueryExportArgs Empty = new V2FolderSccBigQueryExportArgs(); + + /** + * This must be unique within the organization. It must consist of only lowercase letters, + * numbers, and hyphens, must start with a letter, must end with either a letter or a number, + * and must be 63 characters or less. + * + * *** + * + */ + @Import(name="bigQueryExportId", required=true) + private Output bigQueryExportId; + + /** + * @return This must be unique within the organization. It must consist of only lowercase letters, + * numbers, and hyphens, must start with a letter, must end with either a letter or a number, + * and must be 63 characters or less. + * + * *** + * + */ + public Output bigQueryExportId() { + return this.bigQueryExportId; + } + + /** + * The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + */ + @Import(name="dataset") + private @Nullable Output dataset; + + /** + * @return The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + */ + public Optional> dataset() { + return Optional.ofNullable(this.dataset); + } + + /** + * The description of the notification config (max of 1024 characters). + * + */ + @Import(name="description") + private @Nullable Output description; + + /** + * @return The description of the notification config (max of 1024 characters). + * + */ + public Optional> description() { + return Optional.ofNullable(this.description); + } + + /** + * Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + */ + @Import(name="filter") + private @Nullable Output filter; + + /** + * @return Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + */ + public Optional> filter() { + return Optional.ofNullable(this.filter); + } + + /** + * The folder where Cloud Security Command Center Big Query Export + * Config lives in. + * + */ + @Import(name="folder", required=true) + private Output folder; + + /** + * @return The folder where Cloud Security Command Center Big Query Export + * Config lives in. + * + */ + public Output folder() { + return this.folder; + } + + /** + * The BigQuery export configuration is stored in this location. If not provided, Use global as default. + * + */ + @Import(name="location") + private @Nullable Output location; + + /** + * @return The BigQuery export configuration is stored in this location. If not provided, Use global as default. + * + */ + public Optional> location() { + return Optional.ofNullable(this.location); + } + + private V2FolderSccBigQueryExportArgs() {} + + private V2FolderSccBigQueryExportArgs(V2FolderSccBigQueryExportArgs $) { + this.bigQueryExportId = $.bigQueryExportId; + this.dataset = $.dataset; + this.description = $.description; + this.filter = $.filter; + this.folder = $.folder; + this.location = $.location; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(V2FolderSccBigQueryExportArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private V2FolderSccBigQueryExportArgs $; + + public Builder() { + $ = new V2FolderSccBigQueryExportArgs(); + } + + public Builder(V2FolderSccBigQueryExportArgs defaults) { + $ = new V2FolderSccBigQueryExportArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param bigQueryExportId This must be unique within the organization. It must consist of only lowercase letters, + * numbers, and hyphens, must start with a letter, must end with either a letter or a number, + * and must be 63 characters or less. + * + * *** + * + * @return builder + * + */ + public Builder bigQueryExportId(Output bigQueryExportId) { + $.bigQueryExportId = bigQueryExportId; + return this; + } + + /** + * @param bigQueryExportId This must be unique within the organization. It must consist of only lowercase letters, + * numbers, and hyphens, must start with a letter, must end with either a letter or a number, + * and must be 63 characters or less. + * + * *** + * + * @return builder + * + */ + public Builder bigQueryExportId(String bigQueryExportId) { + return bigQueryExportId(Output.of(bigQueryExportId)); + } + + /** + * @param dataset The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + * @return builder + * + */ + public Builder dataset(@Nullable Output dataset) { + $.dataset = dataset; + return this; + } + + /** + * @param dataset The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + * @return builder + * + */ + public Builder dataset(String dataset) { + return dataset(Output.of(dataset)); + } + + /** + * @param description The description of the notification config (max of 1024 characters). + * + * @return builder + * + */ + public Builder description(@Nullable Output description) { + $.description = description; + return this; + } + + /** + * @param description The description of the notification config (max of 1024 characters). + * + * @return builder + * + */ + public Builder description(String description) { + return description(Output.of(description)); + } + + /** + * @param filter Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + * @return builder + * + */ + public Builder filter(@Nullable Output filter) { + $.filter = filter; + return this; + } + + /** + * @param filter Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + * @return builder + * + */ + public Builder filter(String filter) { + return filter(Output.of(filter)); + } + + /** + * @param folder The folder where Cloud Security Command Center Big Query Export + * Config lives in. + * + * @return builder + * + */ + public Builder folder(Output folder) { + $.folder = folder; + return this; + } + + /** + * @param folder The folder where Cloud Security Command Center Big Query Export + * Config lives in. + * + * @return builder + * + */ + public Builder folder(String folder) { + return folder(Output.of(folder)); + } + + /** + * @param location The BigQuery export configuration is stored in this location. If not provided, Use global as default. + * + * @return builder + * + */ + public Builder location(@Nullable Output location) { + $.location = location; + return this; + } + + /** + * @param location The BigQuery export configuration is stored in this location. If not provided, Use global as default. + * + * @return builder + * + */ + public Builder location(String location) { + return location(Output.of(location)); + } + + public V2FolderSccBigQueryExportArgs build() { + if ($.bigQueryExportId == null) { + throw new MissingRequiredPropertyException("V2FolderSccBigQueryExportArgs", "bigQueryExportId"); + } + if ($.folder == null) { + throw new MissingRequiredPropertyException("V2FolderSccBigQueryExportArgs", "folder"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2OrganizationSccBigQueryExports.java b/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2OrganizationSccBigQueryExports.java index 498aa79a8e..64039de667 100644 --- a/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2OrganizationSccBigQueryExports.java +++ b/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2OrganizationSccBigQueryExports.java @@ -58,7 +58,7 @@ * * public static void stack(Context ctx) { * var default_ = new Dataset("default", DatasetArgs.builder() - * .datasetId("my_dataset_id") + * .datasetId("") * .friendlyName("test") * .description("This is a test description") * .location("US") @@ -71,7 +71,7 @@ * .name("my-export") * .bigQueryExportId("my-export") * .organization("123456789") - * .dataset("my-dataset") + * .dataset(default_.id()) * .location("global") * .description("Cloud Security Command Center Findings Big Query Export Config") * .filter("state=\"ACTIVE\" AND NOT mute=\"MUTED\"") diff --git a/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2ProjectSccBigQueryExport.java b/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2ProjectSccBigQueryExport.java new file mode 100644 index 0000000000..c41c6783ad --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2ProjectSccBigQueryExport.java @@ -0,0 +1,335 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.securitycenter; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Export; +import com.pulumi.core.annotations.ResourceType; +import com.pulumi.core.internal.Codegen; +import com.pulumi.gcp.Utilities; +import com.pulumi.gcp.securitycenter.V2ProjectSccBigQueryExportArgs; +import com.pulumi.gcp.securitycenter.inputs.V2ProjectSccBigQueryExportState; +import java.lang.String; +import java.util.Optional; +import javax.annotation.Nullable; + +/** + * A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + * It represents exporting Security Command Center data, including assets, findings, and security marks + * using gcloud scc bqexports + * > **Note:** In order to use Cloud SCC resources, your organization must be enrolled + * in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + * Without doing so, you may run into errors during resource creation. + * + * To get more information about ProjectSccBigQueryExport, see: + * + * * [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/projects.locations.bigQueryExports) + * * How-to Guides + * * [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query) + * + * ## Example Usage + * + * ### Scc V2 Project Big Query Export Config Basic + * + * <!--Start PulumiCodeChooser --> + * <!--End PulumiCodeChooser --> + * + * ## Import + * + * ProjectSccBigQueryExport can be imported using any of these accepted formats: + * + * * `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}` + * + * * `{{project}}/{{location}}/{{big_query_export_id}}` + * + * * `{{location}}/{{big_query_export_id}}` + * + * When using the `pulumi import` command, ProjectSccBigQueryExport can be imported using one of the formats above. For example: + * + * ```sh + * $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} + * ``` + * + * ```sh + * $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default {{project}}/{{location}}/{{big_query_export_id}} + * ``` + * + * ```sh + * $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default {{location}}/{{big_query_export_id}} + * ``` + * + */ +@ResourceType(type="gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport") +public class V2ProjectSccBigQueryExport extends com.pulumi.resources.CustomResource { + /** + * This must be unique within the organization. + * + * *** + * + */ + @Export(name="bigQueryExportId", refs={String.class}, tree="[0]") + private Output bigQueryExportId; + + /** + * @return This must be unique within the organization. + * + * *** + * + */ + public Output bigQueryExportId() { + return this.bigQueryExportId; + } + /** + * The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + @Export(name="createTime", refs={String.class}, tree="[0]") + private Output createTime; + + /** + * @return The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + public Output createTime() { + return this.createTime; + } + /** + * The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + */ + @Export(name="dataset", refs={String.class}, tree="[0]") + private Output dataset; + + /** + * @return The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + */ + public Output> dataset() { + return Codegen.optional(this.dataset); + } + /** + * The description of the notification config (max of 1024 characters). + * + */ + @Export(name="description", refs={String.class}, tree="[0]") + private Output description; + + /** + * @return The description of the notification config (max of 1024 characters). + * + */ + public Output> description() { + return Codegen.optional(this.description); + } + /** + * Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + */ + @Export(name="filter", refs={String.class}, tree="[0]") + private Output filter; + + /** + * @return Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + */ + public Output> filter() { + return Codegen.optional(this.filter); + } + /** + * location Id is provided by organization. If not provided, Use global as default. + * + */ + @Export(name="location", refs={String.class}, tree="[0]") + private Output location; + + /** + * @return location Id is provided by organization. If not provided, Use global as default. + * + */ + public Output> location() { + return Codegen.optional(this.location); + } + /** + * Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + * + */ + @Export(name="mostRecentEditor", refs={String.class}, tree="[0]") + private Output mostRecentEditor; + + /** + * @return Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + * + */ + public Output mostRecentEditor() { + return this.mostRecentEditor; + } + /** + * The resource name of this export, in the format + * `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + * + */ + @Export(name="name", refs={String.class}, tree="[0]") + private Output name; + + /** + * @return The resource name of this export, in the format + * `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + * + */ + public Output name() { + return this.name; + } + /** + * The service account that needs permission to create table and upload data to the BigQuery dataset. + * + */ + @Export(name="principal", refs={String.class}, tree="[0]") + private Output principal; + + /** + * @return The service account that needs permission to create table and upload data to the BigQuery dataset. + * + */ + public Output principal() { + return this.principal; + } + /** + * The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + */ + @Export(name="project", refs={String.class}, tree="[0]") + private Output project; + + /** + * @return The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + */ + public Output project() { + return this.project; + } + /** + * The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + @Export(name="updateTime", refs={String.class}, tree="[0]") + private Output updateTime; + + /** + * @return The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + public Output updateTime() { + return this.updateTime; + } + + /** + * + * @param name The _unique_ name of the resulting resource. + */ + public V2ProjectSccBigQueryExport(java.lang.String name) { + this(name, V2ProjectSccBigQueryExportArgs.Empty); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + */ + public V2ProjectSccBigQueryExport(java.lang.String name, V2ProjectSccBigQueryExportArgs args) { + this(name, args, null); + } + /** + * + * @param name The _unique_ name of the resulting resource. + * @param args The arguments to use to populate this resource's properties. + * @param options A bag of options that control this resource's behavior. + */ + public V2ProjectSccBigQueryExport(java.lang.String name, V2ProjectSccBigQueryExportArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport", name, makeArgs(args, options), makeResourceOptions(options, Codegen.empty()), false); + } + + private V2ProjectSccBigQueryExport(java.lang.String name, Output id, @Nullable V2ProjectSccBigQueryExportState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + super("gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport", name, state, makeResourceOptions(options, id), false); + } + + private static V2ProjectSccBigQueryExportArgs makeArgs(V2ProjectSccBigQueryExportArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { + if (options != null && options.getUrn().isPresent()) { + return null; + } + return args == null ? V2ProjectSccBigQueryExportArgs.Empty : args; + } + + private static com.pulumi.resources.CustomResourceOptions makeResourceOptions(@Nullable com.pulumi.resources.CustomResourceOptions options, @Nullable Output id) { + var defaultOptions = com.pulumi.resources.CustomResourceOptions.builder() + .version(Utilities.getVersion()) + .build(); + return com.pulumi.resources.CustomResourceOptions.merge(defaultOptions, options, id); + } + + /** + * Get an existing Host resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state + * @param options Optional settings to control the behavior of the CustomResource. + */ + public static V2ProjectSccBigQueryExport get(java.lang.String name, Output id, @Nullable V2ProjectSccBigQueryExportState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { + return new V2ProjectSccBigQueryExport(name, id, state, options); + } +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2ProjectSccBigQueryExportArgs.java b/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2ProjectSccBigQueryExportArgs.java new file mode 100644 index 0000000000..f63d917843 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/V2ProjectSccBigQueryExportArgs.java @@ -0,0 +1,364 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.securitycenter; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import com.pulumi.exceptions.MissingRequiredPropertyException; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class V2ProjectSccBigQueryExportArgs extends com.pulumi.resources.ResourceArgs { + + public static final V2ProjectSccBigQueryExportArgs Empty = new V2ProjectSccBigQueryExportArgs(); + + /** + * This must be unique within the organization. + * + * *** + * + */ + @Import(name="bigQueryExportId", required=true) + private Output bigQueryExportId; + + /** + * @return This must be unique within the organization. + * + * *** + * + */ + public Output bigQueryExportId() { + return this.bigQueryExportId; + } + + /** + * The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + */ + @Import(name="dataset") + private @Nullable Output dataset; + + /** + * @return The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + */ + public Optional> dataset() { + return Optional.ofNullable(this.dataset); + } + + /** + * The description of the notification config (max of 1024 characters). + * + */ + @Import(name="description") + private @Nullable Output description; + + /** + * @return The description of the notification config (max of 1024 characters). + * + */ + public Optional> description() { + return Optional.ofNullable(this.description); + } + + /** + * Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + */ + @Import(name="filter") + private @Nullable Output filter; + + /** + * @return Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + */ + public Optional> filter() { + return Optional.ofNullable(this.filter); + } + + /** + * location Id is provided by organization. If not provided, Use global as default. + * + */ + @Import(name="location") + private @Nullable Output location; + + /** + * @return location Id is provided by organization. If not provided, Use global as default. + * + */ + public Optional> location() { + return Optional.ofNullable(this.location); + } + + /** + * The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + */ + @Import(name="project") + private @Nullable Output project; + + /** + * @return The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + */ + public Optional> project() { + return Optional.ofNullable(this.project); + } + + private V2ProjectSccBigQueryExportArgs() {} + + private V2ProjectSccBigQueryExportArgs(V2ProjectSccBigQueryExportArgs $) { + this.bigQueryExportId = $.bigQueryExportId; + this.dataset = $.dataset; + this.description = $.description; + this.filter = $.filter; + this.location = $.location; + this.project = $.project; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(V2ProjectSccBigQueryExportArgs defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private V2ProjectSccBigQueryExportArgs $; + + public Builder() { + $ = new V2ProjectSccBigQueryExportArgs(); + } + + public Builder(V2ProjectSccBigQueryExportArgs defaults) { + $ = new V2ProjectSccBigQueryExportArgs(Objects.requireNonNull(defaults)); + } + + /** + * @param bigQueryExportId This must be unique within the organization. + * + * *** + * + * @return builder + * + */ + public Builder bigQueryExportId(Output bigQueryExportId) { + $.bigQueryExportId = bigQueryExportId; + return this; + } + + /** + * @param bigQueryExportId This must be unique within the organization. + * + * *** + * + * @return builder + * + */ + public Builder bigQueryExportId(String bigQueryExportId) { + return bigQueryExportId(Output.of(bigQueryExportId)); + } + + /** + * @param dataset The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + * @return builder + * + */ + public Builder dataset(@Nullable Output dataset) { + $.dataset = dataset; + return this; + } + + /** + * @param dataset The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + * @return builder + * + */ + public Builder dataset(String dataset) { + return dataset(Output.of(dataset)); + } + + /** + * @param description The description of the notification config (max of 1024 characters). + * + * @return builder + * + */ + public Builder description(@Nullable Output description) { + $.description = description; + return this; + } + + /** + * @param description The description of the notification config (max of 1024 characters). + * + * @return builder + * + */ + public Builder description(String description) { + return description(Output.of(description)); + } + + /** + * @param filter Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + * @return builder + * + */ + public Builder filter(@Nullable Output filter) { + $.filter = filter; + return this; + } + + /** + * @param filter Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + * @return builder + * + */ + public Builder filter(String filter) { + return filter(Output.of(filter)); + } + + /** + * @param location location Id is provided by organization. If not provided, Use global as default. + * + * @return builder + * + */ + public Builder location(@Nullable Output location) { + $.location = location; + return this; + } + + /** + * @param location location Id is provided by organization. If not provided, Use global as default. + * + * @return builder + * + */ + public Builder location(String location) { + return location(Output.of(location)); + } + + /** + * @param project The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + * @return builder + * + */ + public Builder project(@Nullable Output project) { + $.project = project; + return this; + } + + /** + * @param project The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + * @return builder + * + */ + public Builder project(String project) { + return project(Output.of(project)); + } + + public V2ProjectSccBigQueryExportArgs build() { + if ($.bigQueryExportId == null) { + throw new MissingRequiredPropertyException("V2ProjectSccBigQueryExportArgs", "bigQueryExportId"); + } + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/inputs/V2FolderSccBigQueryExportState.java b/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/inputs/V2FolderSccBigQueryExportState.java new file mode 100644 index 0000000000..b2fe43f5ce --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/inputs/V2FolderSccBigQueryExportState.java @@ -0,0 +1,581 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.securitycenter.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class V2FolderSccBigQueryExportState extends com.pulumi.resources.ResourceArgs { + + public static final V2FolderSccBigQueryExportState Empty = new V2FolderSccBigQueryExportState(); + + /** + * This must be unique within the organization. It must consist of only lowercase letters, + * numbers, and hyphens, must start with a letter, must end with either a letter or a number, + * and must be 63 characters or less. + * + * *** + * + */ + @Import(name="bigQueryExportId") + private @Nullable Output bigQueryExportId; + + /** + * @return This must be unique within the organization. It must consist of only lowercase letters, + * numbers, and hyphens, must start with a letter, must end with either a letter or a number, + * and must be 63 characters or less. + * + * *** + * + */ + public Optional> bigQueryExportId() { + return Optional.ofNullable(this.bigQueryExportId); + } + + /** + * The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + @Import(name="createTime") + private @Nullable Output createTime; + + /** + * @return The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + public Optional> createTime() { + return Optional.ofNullable(this.createTime); + } + + /** + * The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + */ + @Import(name="dataset") + private @Nullable Output dataset; + + /** + * @return The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + */ + public Optional> dataset() { + return Optional.ofNullable(this.dataset); + } + + /** + * The description of the notification config (max of 1024 characters). + * + */ + @Import(name="description") + private @Nullable Output description; + + /** + * @return The description of the notification config (max of 1024 characters). + * + */ + public Optional> description() { + return Optional.ofNullable(this.description); + } + + /** + * Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + */ + @Import(name="filter") + private @Nullable Output filter; + + /** + * @return Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + */ + public Optional> filter() { + return Optional.ofNullable(this.filter); + } + + /** + * The folder where Cloud Security Command Center Big Query Export + * Config lives in. + * + */ + @Import(name="folder") + private @Nullable Output folder; + + /** + * @return The folder where Cloud Security Command Center Big Query Export + * Config lives in. + * + */ + public Optional> folder() { + return Optional.ofNullable(this.folder); + } + + /** + * The BigQuery export configuration is stored in this location. If not provided, Use global as default. + * + */ + @Import(name="location") + private @Nullable Output location; + + /** + * @return The BigQuery export configuration is stored in this location. If not provided, Use global as default. + * + */ + public Optional> location() { + return Optional.ofNullable(this.location); + } + + /** + * Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + * + */ + @Import(name="mostRecentEditor") + private @Nullable Output mostRecentEditor; + + /** + * @return Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + * + */ + public Optional> mostRecentEditor() { + return Optional.ofNullable(this.mostRecentEditor); + } + + /** + * The resource name of this export, in the format + * `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + * + */ + @Import(name="name") + private @Nullable Output name; + + /** + * @return The resource name of this export, in the format + * `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + * + */ + public Optional> name() { + return Optional.ofNullable(this.name); + } + + /** + * The service account that needs permission to create table and upload data to the BigQuery dataset. + * + */ + @Import(name="principal") + private @Nullable Output principal; + + /** + * @return The service account that needs permission to create table and upload data to the BigQuery dataset. + * + */ + public Optional> principal() { + return Optional.ofNullable(this.principal); + } + + /** + * The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + @Import(name="updateTime") + private @Nullable Output updateTime; + + /** + * @return The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + public Optional> updateTime() { + return Optional.ofNullable(this.updateTime); + } + + private V2FolderSccBigQueryExportState() {} + + private V2FolderSccBigQueryExportState(V2FolderSccBigQueryExportState $) { + this.bigQueryExportId = $.bigQueryExportId; + this.createTime = $.createTime; + this.dataset = $.dataset; + this.description = $.description; + this.filter = $.filter; + this.folder = $.folder; + this.location = $.location; + this.mostRecentEditor = $.mostRecentEditor; + this.name = $.name; + this.principal = $.principal; + this.updateTime = $.updateTime; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(V2FolderSccBigQueryExportState defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private V2FolderSccBigQueryExportState $; + + public Builder() { + $ = new V2FolderSccBigQueryExportState(); + } + + public Builder(V2FolderSccBigQueryExportState defaults) { + $ = new V2FolderSccBigQueryExportState(Objects.requireNonNull(defaults)); + } + + /** + * @param bigQueryExportId This must be unique within the organization. It must consist of only lowercase letters, + * numbers, and hyphens, must start with a letter, must end with either a letter or a number, + * and must be 63 characters or less. + * + * *** + * + * @return builder + * + */ + public Builder bigQueryExportId(@Nullable Output bigQueryExportId) { + $.bigQueryExportId = bigQueryExportId; + return this; + } + + /** + * @param bigQueryExportId This must be unique within the organization. It must consist of only lowercase letters, + * numbers, and hyphens, must start with a letter, must end with either a letter or a number, + * and must be 63 characters or less. + * + * *** + * + * @return builder + * + */ + public Builder bigQueryExportId(String bigQueryExportId) { + return bigQueryExportId(Output.of(bigQueryExportId)); + } + + /** + * @param createTime The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + * @return builder + * + */ + public Builder createTime(@Nullable Output createTime) { + $.createTime = createTime; + return this; + } + + /** + * @param createTime The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + * @return builder + * + */ + public Builder createTime(String createTime) { + return createTime(Output.of(createTime)); + } + + /** + * @param dataset The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + * @return builder + * + */ + public Builder dataset(@Nullable Output dataset) { + $.dataset = dataset; + return this; + } + + /** + * @param dataset The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + * @return builder + * + */ + public Builder dataset(String dataset) { + return dataset(Output.of(dataset)); + } + + /** + * @param description The description of the notification config (max of 1024 characters). + * + * @return builder + * + */ + public Builder description(@Nullable Output description) { + $.description = description; + return this; + } + + /** + * @param description The description of the notification config (max of 1024 characters). + * + * @return builder + * + */ + public Builder description(String description) { + return description(Output.of(description)); + } + + /** + * @param filter Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + * @return builder + * + */ + public Builder filter(@Nullable Output filter) { + $.filter = filter; + return this; + } + + /** + * @param filter Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + * @return builder + * + */ + public Builder filter(String filter) { + return filter(Output.of(filter)); + } + + /** + * @param folder The folder where Cloud Security Command Center Big Query Export + * Config lives in. + * + * @return builder + * + */ + public Builder folder(@Nullable Output folder) { + $.folder = folder; + return this; + } + + /** + * @param folder The folder where Cloud Security Command Center Big Query Export + * Config lives in. + * + * @return builder + * + */ + public Builder folder(String folder) { + return folder(Output.of(folder)); + } + + /** + * @param location The BigQuery export configuration is stored in this location. If not provided, Use global as default. + * + * @return builder + * + */ + public Builder location(@Nullable Output location) { + $.location = location; + return this; + } + + /** + * @param location The BigQuery export configuration is stored in this location. If not provided, Use global as default. + * + * @return builder + * + */ + public Builder location(String location) { + return location(Output.of(location)); + } + + /** + * @param mostRecentEditor Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + * + * @return builder + * + */ + public Builder mostRecentEditor(@Nullable Output mostRecentEditor) { + $.mostRecentEditor = mostRecentEditor; + return this; + } + + /** + * @param mostRecentEditor Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + * + * @return builder + * + */ + public Builder mostRecentEditor(String mostRecentEditor) { + return mostRecentEditor(Output.of(mostRecentEditor)); + } + + /** + * @param name The resource name of this export, in the format + * `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + * + * @return builder + * + */ + public Builder name(@Nullable Output name) { + $.name = name; + return this; + } + + /** + * @param name The resource name of this export, in the format + * `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + * + * @return builder + * + */ + public Builder name(String name) { + return name(Output.of(name)); + } + + /** + * @param principal The service account that needs permission to create table and upload data to the BigQuery dataset. + * + * @return builder + * + */ + public Builder principal(@Nullable Output principal) { + $.principal = principal; + return this; + } + + /** + * @param principal The service account that needs permission to create table and upload data to the BigQuery dataset. + * + * @return builder + * + */ + public Builder principal(String principal) { + return principal(Output.of(principal)); + } + + /** + * @param updateTime The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + * @return builder + * + */ + public Builder updateTime(@Nullable Output updateTime) { + $.updateTime = updateTime; + return this; + } + + /** + * @param updateTime The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + * @return builder + * + */ + public Builder updateTime(String updateTime) { + return updateTime(Output.of(updateTime)); + } + + public V2FolderSccBigQueryExportState build() { + return $; + } + } + +} diff --git a/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/inputs/V2ProjectSccBigQueryExportState.java b/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/inputs/V2ProjectSccBigQueryExportState.java new file mode 100644 index 0000000000..4b8c212e41 --- /dev/null +++ b/sdk/java/src/main/java/com/pulumi/gcp/securitycenter/inputs/V2ProjectSccBigQueryExportState.java @@ -0,0 +1,573 @@ +// *** WARNING: this file was generated by pulumi-java-gen. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +package com.pulumi.gcp.securitycenter.inputs; + +import com.pulumi.core.Output; +import com.pulumi.core.annotations.Import; +import java.lang.String; +import java.util.Objects; +import java.util.Optional; +import javax.annotation.Nullable; + + +public final class V2ProjectSccBigQueryExportState extends com.pulumi.resources.ResourceArgs { + + public static final V2ProjectSccBigQueryExportState Empty = new V2ProjectSccBigQueryExportState(); + + /** + * This must be unique within the organization. + * + * *** + * + */ + @Import(name="bigQueryExportId") + private @Nullable Output bigQueryExportId; + + /** + * @return This must be unique within the organization. + * + * *** + * + */ + public Optional> bigQueryExportId() { + return Optional.ofNullable(this.bigQueryExportId); + } + + /** + * The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + @Import(name="createTime") + private @Nullable Output createTime; + + /** + * @return The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + public Optional> createTime() { + return Optional.ofNullable(this.createTime); + } + + /** + * The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + */ + @Import(name="dataset") + private @Nullable Output dataset; + + /** + * @return The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + */ + public Optional> dataset() { + return Optional.ofNullable(this.dataset); + } + + /** + * The description of the notification config (max of 1024 characters). + * + */ + @Import(name="description") + private @Nullable Output description; + + /** + * @return The description of the notification config (max of 1024 characters). + * + */ + public Optional> description() { + return Optional.ofNullable(this.description); + } + + /** + * Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + */ + @Import(name="filter") + private @Nullable Output filter; + + /** + * @return Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + */ + public Optional> filter() { + return Optional.ofNullable(this.filter); + } + + /** + * location Id is provided by organization. If not provided, Use global as default. + * + */ + @Import(name="location") + private @Nullable Output location; + + /** + * @return location Id is provided by organization. If not provided, Use global as default. + * + */ + public Optional> location() { + return Optional.ofNullable(this.location); + } + + /** + * Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + * + */ + @Import(name="mostRecentEditor") + private @Nullable Output mostRecentEditor; + + /** + * @return Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + * + */ + public Optional> mostRecentEditor() { + return Optional.ofNullable(this.mostRecentEditor); + } + + /** + * The resource name of this export, in the format + * `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + * + */ + @Import(name="name") + private @Nullable Output name; + + /** + * @return The resource name of this export, in the format + * `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + * + */ + public Optional> name() { + return Optional.ofNullable(this.name); + } + + /** + * The service account that needs permission to create table and upload data to the BigQuery dataset. + * + */ + @Import(name="principal") + private @Nullable Output principal; + + /** + * @return The service account that needs permission to create table and upload data to the BigQuery dataset. + * + */ + public Optional> principal() { + return Optional.ofNullable(this.principal); + } + + /** + * The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + */ + @Import(name="project") + private @Nullable Output project; + + /** + * @return The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + */ + public Optional> project() { + return Optional.ofNullable(this.project); + } + + /** + * The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + @Import(name="updateTime") + private @Nullable Output updateTime; + + /** + * @return The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + */ + public Optional> updateTime() { + return Optional.ofNullable(this.updateTime); + } + + private V2ProjectSccBigQueryExportState() {} + + private V2ProjectSccBigQueryExportState(V2ProjectSccBigQueryExportState $) { + this.bigQueryExportId = $.bigQueryExportId; + this.createTime = $.createTime; + this.dataset = $.dataset; + this.description = $.description; + this.filter = $.filter; + this.location = $.location; + this.mostRecentEditor = $.mostRecentEditor; + this.name = $.name; + this.principal = $.principal; + this.project = $.project; + this.updateTime = $.updateTime; + } + + public static Builder builder() { + return new Builder(); + } + public static Builder builder(V2ProjectSccBigQueryExportState defaults) { + return new Builder(defaults); + } + + public static final class Builder { + private V2ProjectSccBigQueryExportState $; + + public Builder() { + $ = new V2ProjectSccBigQueryExportState(); + } + + public Builder(V2ProjectSccBigQueryExportState defaults) { + $ = new V2ProjectSccBigQueryExportState(Objects.requireNonNull(defaults)); + } + + /** + * @param bigQueryExportId This must be unique within the organization. + * + * *** + * + * @return builder + * + */ + public Builder bigQueryExportId(@Nullable Output bigQueryExportId) { + $.bigQueryExportId = bigQueryExportId; + return this; + } + + /** + * @param bigQueryExportId This must be unique within the organization. + * + * *** + * + * @return builder + * + */ + public Builder bigQueryExportId(String bigQueryExportId) { + return bigQueryExportId(Output.of(bigQueryExportId)); + } + + /** + * @param createTime The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + * @return builder + * + */ + public Builder createTime(@Nullable Output createTime) { + $.createTime = createTime; + return this; + } + + /** + * @param createTime The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + * @return builder + * + */ + public Builder createTime(String createTime) { + return createTime(Output.of(createTime)); + } + + /** + * @param dataset The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + * @return builder + * + */ + public Builder dataset(@Nullable Output dataset) { + $.dataset = dataset; + return this; + } + + /** + * @param dataset The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + * + * @return builder + * + */ + public Builder dataset(String dataset) { + return dataset(Output.of(dataset)); + } + + /** + * @param description The description of the notification config (max of 1024 characters). + * + * @return builder + * + */ + public Builder description(@Nullable Output description) { + $.description = description; + return this; + } + + /** + * @param description The description of the notification config (max of 1024 characters). + * + * @return builder + * + */ + public Builder description(String description) { + return description(Output.of(description)); + } + + /** + * @param filter Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + * @return builder + * + */ + public Builder filter(@Nullable Output filter) { + $.filter = filter; + return this; + } + + /** + * @param filter Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form <field> <operator> <value> and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * > , <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + * + * @return builder + * + */ + public Builder filter(String filter) { + return filter(Output.of(filter)); + } + + /** + * @param location location Id is provided by organization. If not provided, Use global as default. + * + * @return builder + * + */ + public Builder location(@Nullable Output location) { + $.location = location; + return this; + } + + /** + * @param location location Id is provided by organization. If not provided, Use global as default. + * + * @return builder + * + */ + public Builder location(String location) { + return location(Output.of(location)); + } + + /** + * @param mostRecentEditor Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + * + * @return builder + * + */ + public Builder mostRecentEditor(@Nullable Output mostRecentEditor) { + $.mostRecentEditor = mostRecentEditor; + return this; + } + + /** + * @param mostRecentEditor Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + * + * @return builder + * + */ + public Builder mostRecentEditor(String mostRecentEditor) { + return mostRecentEditor(Output.of(mostRecentEditor)); + } + + /** + * @param name The resource name of this export, in the format + * `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + * + * @return builder + * + */ + public Builder name(@Nullable Output name) { + $.name = name; + return this; + } + + /** + * @param name The resource name of this export, in the format + * `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + * + * @return builder + * + */ + public Builder name(String name) { + return name(Output.of(name)); + } + + /** + * @param principal The service account that needs permission to create table and upload data to the BigQuery dataset. + * + * @return builder + * + */ + public Builder principal(@Nullable Output principal) { + $.principal = principal; + return this; + } + + /** + * @param principal The service account that needs permission to create table and upload data to the BigQuery dataset. + * + * @return builder + * + */ + public Builder principal(String principal) { + return principal(Output.of(principal)); + } + + /** + * @param project The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + * @return builder + * + */ + public Builder project(@Nullable Output project) { + $.project = project; + return this; + } + + /** + * @param project The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + * + * @return builder + * + */ + public Builder project(String project) { + return project(Output.of(project)); + } + + /** + * @param updateTime The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + * @return builder + * + */ + public Builder updateTime(@Nullable Output updateTime) { + $.updateTime = updateTime; + return this; + } + + /** + * @param updateTime The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + * + * @return builder + * + */ + public Builder updateTime(String updateTime) { + return updateTime(Output.of(updateTime)); + } + + public V2ProjectSccBigQueryExportState build() { + return $; + } + } + +} diff --git a/sdk/nodejs/alloydb/cluster.ts b/sdk/nodejs/alloydb/cluster.ts index 0b568d0f9c..91f6b2474e 100644 --- a/sdk/nodejs/alloydb/cluster.ts +++ b/sdk/nodejs/alloydb/cluster.ts @@ -233,6 +233,7 @@ export class Cluster extends pulumi.CustomResource { * Policy to determine if the cluster should be deleted forcefully. * Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. * Deleting a Secondary cluster with a secondary instance REQUIRES setting deletionPolicy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + * Possible values: DEFAULT, FORCE */ public readonly deletionPolicy!: pulumi.Output; /** @@ -336,6 +337,16 @@ export class Cluster extends pulumi.CustomResource { * Output only. The current serving state of the cluster. */ public /*out*/ readonly state!: pulumi.Output; + /** + * The subscrition type of cluster. + * Possible values are: `TRIAL`, `STANDARD`. + */ + public readonly subscriptionType!: pulumi.Output; + /** + * Contains information and all metadata related to TRIAL clusters. + * Structure is documented below. + */ + public /*out*/ readonly trialMetadatas!: pulumi.Output; /** * The system-generated UID of the resource. */ @@ -384,6 +395,8 @@ export class Cluster extends pulumi.CustomResource { resourceInputs["restoreContinuousBackupSource"] = state ? state.restoreContinuousBackupSource : undefined; resourceInputs["secondaryConfig"] = state ? state.secondaryConfig : undefined; resourceInputs["state"] = state ? state.state : undefined; + resourceInputs["subscriptionType"] = state ? state.subscriptionType : undefined; + resourceInputs["trialMetadatas"] = state ? state.trialMetadatas : undefined; resourceInputs["uid"] = state ? state.uid : undefined; } else { const args = argsOrState as ClusterArgs | undefined; @@ -413,6 +426,7 @@ export class Cluster extends pulumi.CustomResource { resourceInputs["restoreBackupSource"] = args ? args.restoreBackupSource : undefined; resourceInputs["restoreContinuousBackupSource"] = args ? args.restoreContinuousBackupSource : undefined; resourceInputs["secondaryConfig"] = args ? args.secondaryConfig : undefined; + resourceInputs["subscriptionType"] = args ? args.subscriptionType : undefined; resourceInputs["backupSources"] = undefined /*out*/; resourceInputs["continuousBackupInfos"] = undefined /*out*/; resourceInputs["effectiveAnnotations"] = undefined /*out*/; @@ -423,6 +437,7 @@ export class Cluster extends pulumi.CustomResource { resourceInputs["pulumiLabels"] = undefined /*out*/; resourceInputs["reconciling"] = undefined /*out*/; resourceInputs["state"] = undefined /*out*/; + resourceInputs["trialMetadatas"] = undefined /*out*/; resourceInputs["uid"] = undefined /*out*/; } opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); @@ -483,6 +498,7 @@ export interface ClusterState { * Policy to determine if the cluster should be deleted forcefully. * Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. * Deleting a Secondary cluster with a secondary instance REQUIRES setting deletionPolicy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + * Possible values: DEFAULT, FORCE */ deletionPolicy?: pulumi.Input; /** @@ -586,6 +602,16 @@ export interface ClusterState { * Output only. The current serving state of the cluster. */ state?: pulumi.Input; + /** + * The subscrition type of cluster. + * Possible values are: `TRIAL`, `STANDARD`. + */ + subscriptionType?: pulumi.Input; + /** + * Contains information and all metadata related to TRIAL clusters. + * Structure is documented below. + */ + trialMetadatas?: pulumi.Input[]>; /** * The system-generated UID of the resource. */ @@ -633,6 +659,7 @@ export interface ClusterArgs { * Policy to determine if the cluster should be deleted forcefully. * Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. * Deleting a Secondary cluster with a secondary instance REQUIRES setting deletionPolicy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + * Possible values: DEFAULT, FORCE */ deletionPolicy?: pulumi.Input; /** @@ -701,4 +728,9 @@ export interface ClusterArgs { * Structure is documented below. */ secondaryConfig?: pulumi.Input; + /** + * The subscrition type of cluster. + * Possible values are: `TRIAL`, `STANDARD`. + */ + subscriptionType?: pulumi.Input; } diff --git a/sdk/nodejs/assuredworkloads/workload.ts b/sdk/nodejs/assuredworkloads/workload.ts index ce69f94210..267c130d0a 100644 --- a/sdk/nodejs/assuredworkloads/workload.ts +++ b/sdk/nodejs/assuredworkloads/workload.ts @@ -30,7 +30,7 @@ import * as utilities from "../utilities"; * provisionedResourcesParent: "folders/519620126891", * resourceSettings: [ * { - * displayName: "folder-display-name", + * displayName: "{{name}}", * resourceType: "CONSUMER_FOLDER", * }, * { @@ -81,6 +81,43 @@ import * as utilities from "../utilities"; * }, * }); * ``` + * ### Split_billing_partner_workload + * A Split billing partner test of the assuredworkloads api + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const primary = new gcp.assuredworkloads.Workload("primary", { + * complianceRegime: "ASSURED_WORKLOADS_FOR_PARTNERS", + * displayName: "display", + * location: "europe-west8", + * organization: "123456789", + * billingAccount: "billingAccounts/000000-0000000-0000000-000000", + * partner: "SOVEREIGN_CONTROLS_BY_PSN", + * partnerPermissions: { + * assuredWorkloadsMonitoring: true, + * dataLogsViewer: true, + * serviceAccessApprover: true, + * }, + * partnerServicesBillingAccount: "billingAccounts/01BF3F-2C6DE5-30C607", + * resourceSettings: [ + * { + * resourceType: "CONSUMER_FOLDER", + * }, + * { + * resourceType: "ENCRYPTION_KEYS_PROJECT", + * }, + * { + * resourceId: "ring", + * resourceType: "KEYRING", + * }, + * ], + * violationNotificationsEnabled: true, + * labels: { + * "label-one": "value-one", + * }, + * }); + * ``` * * ## Import * @@ -133,7 +170,7 @@ export class Workload extends pulumi.CustomResource { */ public readonly billingAccount!: pulumi.Output; /** - * Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + * Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS */ public readonly complianceRegime!: pulumi.Output; /** @@ -196,13 +233,17 @@ export class Workload extends pulumi.CustomResource { */ public readonly organization!: pulumi.Output; /** - * Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + * Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM */ public readonly partner!: pulumi.Output; /** * Optional. Permissions granted to the AW Partner SA account for the customer workload */ public readonly partnerPermissions!: pulumi.Output; + /** + * Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + */ + public readonly partnerServicesBillingAccount!: pulumi.Output; /** * Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} */ @@ -258,6 +299,7 @@ export class Workload extends pulumi.CustomResource { resourceInputs["organization"] = state ? state.organization : undefined; resourceInputs["partner"] = state ? state.partner : undefined; resourceInputs["partnerPermissions"] = state ? state.partnerPermissions : undefined; + resourceInputs["partnerServicesBillingAccount"] = state ? state.partnerServicesBillingAccount : undefined; resourceInputs["provisionedResourcesParent"] = state ? state.provisionedResourcesParent : undefined; resourceInputs["pulumiLabels"] = state ? state.pulumiLabels : undefined; resourceInputs["resourceSettings"] = state ? state.resourceSettings : undefined; @@ -288,6 +330,7 @@ export class Workload extends pulumi.CustomResource { resourceInputs["organization"] = args ? args.organization : undefined; resourceInputs["partner"] = args ? args.partner : undefined; resourceInputs["partnerPermissions"] = args ? args.partnerPermissions : undefined; + resourceInputs["partnerServicesBillingAccount"] = args ? args.partnerServicesBillingAccount : undefined; resourceInputs["provisionedResourcesParent"] = args ? args.provisionedResourcesParent : undefined; resourceInputs["resourceSettings"] = args ? args.resourceSettings : undefined; resourceInputs["violationNotificationsEnabled"] = args ? args.violationNotificationsEnabled : undefined; @@ -318,7 +361,7 @@ export interface WorkloadState { */ billingAccount?: pulumi.Input; /** - * Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + * Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS */ complianceRegime?: pulumi.Input; /** @@ -381,13 +424,17 @@ export interface WorkloadState { */ organization?: pulumi.Input; /** - * Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + * Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM */ partner?: pulumi.Input; /** * Optional. Permissions granted to the AW Partner SA account for the customer workload */ partnerPermissions?: pulumi.Input; + /** + * Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + */ + partnerServicesBillingAccount?: pulumi.Input; /** * Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} */ @@ -423,7 +470,7 @@ export interface WorkloadArgs { */ billingAccount?: pulumi.Input; /** - * Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + * Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS */ complianceRegime: pulumi.Input; /** @@ -458,13 +505,17 @@ export interface WorkloadArgs { */ organization: pulumi.Input; /** - * Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + * Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM */ partner?: pulumi.Input; /** * Optional. Permissions granted to the AW Partner SA account for the customer workload */ partnerPermissions?: pulumi.Input; + /** + * Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + */ + partnerServicesBillingAccount?: pulumi.Input; /** * Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} */ diff --git a/sdk/nodejs/backupdisasterrecovery/backupVault.ts b/sdk/nodejs/backupdisasterrecovery/backupVault.ts new file mode 100644 index 0000000000..fe7b1e8509 --- /dev/null +++ b/sdk/nodejs/backupdisasterrecovery/backupVault.ts @@ -0,0 +1,464 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as utilities from "../utilities"; + +/** + * ## Example Usage + * + * ### Backup Dr Backup Vault Full + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const backup_vault_test = new gcp.backupdisasterrecovery.BackupVault("backup-vault-test", { + * location: "us-central1", + * backupVaultId: "backup-vault-test", + * description: "This is a second backup vault built by Terraform.", + * backupMinimumEnforcedRetentionDuration: "100000s", + * labels: { + * foo: "bar1", + * bar: "baz1", + * }, + * annotations: { + * annotations1: "bar1", + * annotations2: "baz1", + * }, + * forceUpdate: true, + * forceDelete: true, + * allowMissing: true, + * }); + * ``` + * + * ## Import + * + * BackupVault can be imported using any of these accepted formats: + * + * * `projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}` + * + * * `{{project}}/{{location}}/{{backup_vault_id}}` + * + * * `{{location}}/{{backup_vault_id}}` + * + * When using the `pulumi import` command, BackupVault can be imported using one of the formats above. For example: + * + * ```sh + * $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}} + * ``` + * + * ```sh + * $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default {{project}}/{{location}}/{{backup_vault_id}} + * ``` + * + * ```sh + * $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default {{location}}/{{backup_vault_id}} + * ``` + */ +export class BackupVault extends pulumi.CustomResource { + /** + * Get an existing BackupVault resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state Any extra arguments used during the lookup. + * @param opts Optional settings to control the behavior of the CustomResource. + */ + public static get(name: string, id: pulumi.Input, state?: BackupVaultState, opts?: pulumi.CustomResourceOptions): BackupVault { + return new BackupVault(name, state, { ...opts, id: id }); + } + + /** @internal */ + public static readonly __pulumiType = 'gcp:backupdisasterrecovery/backupVault:BackupVault'; + + /** + * Returns true if the given object is an instance of BackupVault. This is designed to work even + * when multiple copies of the Pulumi SDK have been loaded into the same process. + */ + public static isInstance(obj: any): obj is BackupVault { + if (obj === undefined || obj === null) { + return false; + } + return obj['__pulumiType'] === BackupVault.__pulumiType; + } + + /** + * Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + */ + public readonly allowMissing!: pulumi.Output; + /** + * Optional. User annotations. See https://google.aip.dev/128#annotations + * Stores small amounts of arbitrary data. + * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + * Please refer to the field `effectiveAnnotations` for all of the annotations present on the resource. + */ + public readonly annotations!: pulumi.Output<{[key: string]: string} | undefined>; + /** + * Output only. The number of backups in this backup vault. + */ + public /*out*/ readonly backupCount!: pulumi.Output; + /** + * Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + */ + public readonly backupMinimumEnforcedRetentionDuration!: pulumi.Output; + /** + * Required. ID of the requesting object. + * + * + * - - - + */ + public readonly backupVaultId!: pulumi.Output; + /** + * Output only. The time when the instance was created. + */ + public /*out*/ readonly createTime!: pulumi.Output; + /** + * Output only. Set to true when there are no backups nested under this resource. + */ + public /*out*/ readonly deletable!: pulumi.Output; + /** + * Optional. The description of the BackupVault instance (2048 characters or less). + */ + public readonly description!: pulumi.Output; + public /*out*/ readonly effectiveAnnotations!: pulumi.Output<{[key: string]: string}>; + /** + * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + */ + public /*out*/ readonly effectiveLabels!: pulumi.Output<{[key: string]: string}>; + /** + * Optional. Time after which the BackupVault resource is locked. + */ + public readonly effectiveTime!: pulumi.Output; + /** + * Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + */ + public /*out*/ readonly etag!: pulumi.Output; + /** + * If set, the following restrictions against deletion of the backup vault instance can be overridden: + * * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * * deletion of a backup vault instance that is being referenced by an active backup plan. + */ + public readonly forceDelete!: pulumi.Output; + /** + * If set, allow update to extend the minimum enforced retention for backup vault. This overrides + * the restriction against conflicting retention periods. This conflict may occur when the + * expiration schedule defined by the associated backup plan is shorter than the minimum + * retention set by the backup vault. + */ + public readonly forceUpdate!: pulumi.Output; + /** + * Optional. Resource labels to represent user provided metadata. + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effectiveLabels` for all of the labels present on the resource. + */ + public readonly labels!: pulumi.Output<{[key: string]: string} | undefined>; + /** + * The GCP location for the backup vault. + */ + public readonly location!: pulumi.Output; + /** + * Output only. Identifier. The resource name. + */ + public /*out*/ readonly name!: pulumi.Output; + /** + * The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + */ + public readonly project!: pulumi.Output; + /** + * The combination of labels configured directly on the resource + * and default labels configured on the provider. + */ + public /*out*/ readonly pulumiLabels!: pulumi.Output<{[key: string]: string}>; + /** + * Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + */ + public /*out*/ readonly serviceAccount!: pulumi.Output; + /** + * Output only. The BackupVault resource instance state. + * Possible values: + * STATE_UNSPECIFIED + * CREATING + * ACTIVE + * DELETING + * ERROR + */ + public /*out*/ readonly state!: pulumi.Output; + /** + * Output only. Total size of the storage used by all backup resources. + */ + public /*out*/ readonly totalStoredBytes!: pulumi.Output; + /** + * Output only. Output only Immutable after resource creation until resource deletion. + */ + public /*out*/ readonly uid!: pulumi.Output; + /** + * Output only. The time when the instance was updated. + */ + public /*out*/ readonly updateTime!: pulumi.Output; + + /** + * Create a BackupVault resource with the given unique name, arguments, and options. + * + * @param name The _unique_ name of the resource. + * @param args The arguments to use to populate this resource's properties. + * @param opts A bag of options that control this resource's behavior. + */ + constructor(name: string, args: BackupVaultArgs, opts?: pulumi.CustomResourceOptions) + constructor(name: string, argsOrState?: BackupVaultArgs | BackupVaultState, opts?: pulumi.CustomResourceOptions) { + let resourceInputs: pulumi.Inputs = {}; + opts = opts || {}; + if (opts.id) { + const state = argsOrState as BackupVaultState | undefined; + resourceInputs["allowMissing"] = state ? state.allowMissing : undefined; + resourceInputs["annotations"] = state ? state.annotations : undefined; + resourceInputs["backupCount"] = state ? state.backupCount : undefined; + resourceInputs["backupMinimumEnforcedRetentionDuration"] = state ? state.backupMinimumEnforcedRetentionDuration : undefined; + resourceInputs["backupVaultId"] = state ? state.backupVaultId : undefined; + resourceInputs["createTime"] = state ? state.createTime : undefined; + resourceInputs["deletable"] = state ? state.deletable : undefined; + resourceInputs["description"] = state ? state.description : undefined; + resourceInputs["effectiveAnnotations"] = state ? state.effectiveAnnotations : undefined; + resourceInputs["effectiveLabels"] = state ? state.effectiveLabels : undefined; + resourceInputs["effectiveTime"] = state ? state.effectiveTime : undefined; + resourceInputs["etag"] = state ? state.etag : undefined; + resourceInputs["forceDelete"] = state ? state.forceDelete : undefined; + resourceInputs["forceUpdate"] = state ? state.forceUpdate : undefined; + resourceInputs["labels"] = state ? state.labels : undefined; + resourceInputs["location"] = state ? state.location : undefined; + resourceInputs["name"] = state ? state.name : undefined; + resourceInputs["project"] = state ? state.project : undefined; + resourceInputs["pulumiLabels"] = state ? state.pulumiLabels : undefined; + resourceInputs["serviceAccount"] = state ? state.serviceAccount : undefined; + resourceInputs["state"] = state ? state.state : undefined; + resourceInputs["totalStoredBytes"] = state ? state.totalStoredBytes : undefined; + resourceInputs["uid"] = state ? state.uid : undefined; + resourceInputs["updateTime"] = state ? state.updateTime : undefined; + } else { + const args = argsOrState as BackupVaultArgs | undefined; + if ((!args || args.backupMinimumEnforcedRetentionDuration === undefined) && !opts.urn) { + throw new Error("Missing required property 'backupMinimumEnforcedRetentionDuration'"); + } + if ((!args || args.backupVaultId === undefined) && !opts.urn) { + throw new Error("Missing required property 'backupVaultId'"); + } + if ((!args || args.location === undefined) && !opts.urn) { + throw new Error("Missing required property 'location'"); + } + resourceInputs["allowMissing"] = args ? args.allowMissing : undefined; + resourceInputs["annotations"] = args ? args.annotations : undefined; + resourceInputs["backupMinimumEnforcedRetentionDuration"] = args ? args.backupMinimumEnforcedRetentionDuration : undefined; + resourceInputs["backupVaultId"] = args ? args.backupVaultId : undefined; + resourceInputs["description"] = args ? args.description : undefined; + resourceInputs["effectiveTime"] = args ? args.effectiveTime : undefined; + resourceInputs["forceDelete"] = args ? args.forceDelete : undefined; + resourceInputs["forceUpdate"] = args ? args.forceUpdate : undefined; + resourceInputs["labels"] = args ? args.labels : undefined; + resourceInputs["location"] = args ? args.location : undefined; + resourceInputs["project"] = args ? args.project : undefined; + resourceInputs["backupCount"] = undefined /*out*/; + resourceInputs["createTime"] = undefined /*out*/; + resourceInputs["deletable"] = undefined /*out*/; + resourceInputs["effectiveAnnotations"] = undefined /*out*/; + resourceInputs["effectiveLabels"] = undefined /*out*/; + resourceInputs["etag"] = undefined /*out*/; + resourceInputs["name"] = undefined /*out*/; + resourceInputs["pulumiLabels"] = undefined /*out*/; + resourceInputs["serviceAccount"] = undefined /*out*/; + resourceInputs["state"] = undefined /*out*/; + resourceInputs["totalStoredBytes"] = undefined /*out*/; + resourceInputs["uid"] = undefined /*out*/; + resourceInputs["updateTime"] = undefined /*out*/; + } + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); + const secretOpts = { additionalSecretOutputs: ["effectiveLabels", "pulumiLabels"] }; + opts = pulumi.mergeOptions(opts, secretOpts); + super(BackupVault.__pulumiType, name, resourceInputs, opts); + } +} + +/** + * Input properties used for looking up and filtering BackupVault resources. + */ +export interface BackupVaultState { + /** + * Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + */ + allowMissing?: pulumi.Input; + /** + * Optional. User annotations. See https://google.aip.dev/128#annotations + * Stores small amounts of arbitrary data. + * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + * Please refer to the field `effectiveAnnotations` for all of the annotations present on the resource. + */ + annotations?: pulumi.Input<{[key: string]: pulumi.Input}>; + /** + * Output only. The number of backups in this backup vault. + */ + backupCount?: pulumi.Input; + /** + * Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + */ + backupMinimumEnforcedRetentionDuration?: pulumi.Input; + /** + * Required. ID of the requesting object. + * + * + * - - - + */ + backupVaultId?: pulumi.Input; + /** + * Output only. The time when the instance was created. + */ + createTime?: pulumi.Input; + /** + * Output only. Set to true when there are no backups nested under this resource. + */ + deletable?: pulumi.Input; + /** + * Optional. The description of the BackupVault instance (2048 characters or less). + */ + description?: pulumi.Input; + effectiveAnnotations?: pulumi.Input<{[key: string]: pulumi.Input}>; + /** + * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + */ + effectiveLabels?: pulumi.Input<{[key: string]: pulumi.Input}>; + /** + * Optional. Time after which the BackupVault resource is locked. + */ + effectiveTime?: pulumi.Input; + /** + * Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + */ + etag?: pulumi.Input; + /** + * If set, the following restrictions against deletion of the backup vault instance can be overridden: + * * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * * deletion of a backup vault instance that is being referenced by an active backup plan. + */ + forceDelete?: pulumi.Input; + /** + * If set, allow update to extend the minimum enforced retention for backup vault. This overrides + * the restriction against conflicting retention periods. This conflict may occur when the + * expiration schedule defined by the associated backup plan is shorter than the minimum + * retention set by the backup vault. + */ + forceUpdate?: pulumi.Input; + /** + * Optional. Resource labels to represent user provided metadata. + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effectiveLabels` for all of the labels present on the resource. + */ + labels?: pulumi.Input<{[key: string]: pulumi.Input}>; + /** + * The GCP location for the backup vault. + */ + location?: pulumi.Input; + /** + * Output only. Identifier. The resource name. + */ + name?: pulumi.Input; + /** + * The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + */ + project?: pulumi.Input; + /** + * The combination of labels configured directly on the resource + * and default labels configured on the provider. + */ + pulumiLabels?: pulumi.Input<{[key: string]: pulumi.Input}>; + /** + * Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + */ + serviceAccount?: pulumi.Input; + /** + * Output only. The BackupVault resource instance state. + * Possible values: + * STATE_UNSPECIFIED + * CREATING + * ACTIVE + * DELETING + * ERROR + */ + state?: pulumi.Input; + /** + * Output only. Total size of the storage used by all backup resources. + */ + totalStoredBytes?: pulumi.Input; + /** + * Output only. Output only Immutable after resource creation until resource deletion. + */ + uid?: pulumi.Input; + /** + * Output only. The time when the instance was updated. + */ + updateTime?: pulumi.Input; +} + +/** + * The set of arguments for constructing a BackupVault resource. + */ +export interface BackupVaultArgs { + /** + * Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + */ + allowMissing?: pulumi.Input; + /** + * Optional. User annotations. See https://google.aip.dev/128#annotations + * Stores small amounts of arbitrary data. + * **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + * Please refer to the field `effectiveAnnotations` for all of the annotations present on the resource. + */ + annotations?: pulumi.Input<{[key: string]: pulumi.Input}>; + /** + * Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + */ + backupMinimumEnforcedRetentionDuration: pulumi.Input; + /** + * Required. ID of the requesting object. + * + * + * - - - + */ + backupVaultId: pulumi.Input; + /** + * Optional. The description of the BackupVault instance (2048 characters or less). + */ + description?: pulumi.Input; + /** + * Optional. Time after which the BackupVault resource is locked. + */ + effectiveTime?: pulumi.Input; + /** + * If set, the following restrictions against deletion of the backup vault instance can be overridden: + * * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * * deletion of a backup vault instance that is being referenced by an active backup plan. + */ + forceDelete?: pulumi.Input; + /** + * If set, allow update to extend the minimum enforced retention for backup vault. This overrides + * the restriction against conflicting retention periods. This conflict may occur when the + * expiration schedule defined by the associated backup plan is shorter than the minimum + * retention set by the backup vault. + */ + forceUpdate?: pulumi.Input; + /** + * Optional. Resource labels to represent user provided metadata. + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field `effectiveLabels` for all of the labels present on the resource. + */ + labels?: pulumi.Input<{[key: string]: pulumi.Input}>; + /** + * The GCP location for the backup vault. + */ + location: pulumi.Input; + /** + * The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + */ + project?: pulumi.Input; +} diff --git a/sdk/nodejs/backupdisasterrecovery/index.ts b/sdk/nodejs/backupdisasterrecovery/index.ts index 85e0305b01..dd08a6051e 100644 --- a/sdk/nodejs/backupdisasterrecovery/index.ts +++ b/sdk/nodejs/backupdisasterrecovery/index.ts @@ -5,6 +5,11 @@ import * as pulumi from "@pulumi/pulumi"; import * as utilities from "../utilities"; // Export members: +export { BackupVaultArgs, BackupVaultState } from "./backupVault"; +export type BackupVault = import("./backupVault").BackupVault; +export const BackupVault: typeof import("./backupVault").BackupVault = null as any; +utilities.lazyLoad(exports, ["BackupVault"], () => require("./backupVault")); + export { GetManagementServerArgs, GetManagementServerResult, GetManagementServerOutputArgs } from "./getManagementServer"; export const getManagementServer: typeof import("./getManagementServer").getManagementServer = null as any; export const getManagementServerOutput: typeof import("./getManagementServer").getManagementServerOutput = null as any; @@ -20,6 +25,8 @@ const _module = { version: utilities.getVersion(), construct: (name: string, type: string, urn: string): pulumi.Resource => { switch (type) { + case "gcp:backupdisasterrecovery/backupVault:BackupVault": + return new BackupVault(name, undefined, { urn }) case "gcp:backupdisasterrecovery/managementServer:ManagementServer": return new ManagementServer(name, undefined, { urn }) default: @@ -27,4 +34,5 @@ const _module = { } }, }; +pulumi.runtime.registerResourceModule("gcp", "backupdisasterrecovery/backupVault", _module) pulumi.runtime.registerResourceModule("gcp", "backupdisasterrecovery/managementServer", _module) diff --git a/sdk/nodejs/bigquery/dataTransferConfig.ts b/sdk/nodejs/bigquery/dataTransferConfig.ts index 1046be49fc..fd4677ea84 100644 --- a/sdk/nodejs/bigquery/dataTransferConfig.ts +++ b/sdk/nodejs/bigquery/dataTransferConfig.ts @@ -53,6 +53,52 @@ import * as utilities from "../utilities"; * dependsOn: [permissions], * }); * ``` + * ### Bigquerydatatransfer Config Cmek + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const project = gcp.organizations.getProject({}); + * const permissions = new gcp.projects.IAMMember("permissions", { + * project: project.then(project => project.projectId), + * role: "roles/iam.serviceAccountTokenCreator", + * member: project.then(project => `serviceAccount:service-${project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com`), + * }); + * const myDataset = new gcp.bigquery.Dataset("my_dataset", { + * datasetId: "example_dataset", + * friendlyName: "foo", + * description: "bar", + * location: "asia-northeast1", + * }, { + * dependsOn: [permissions], + * }); + * const keyRing = new gcp.kms.KeyRing("key_ring", { + * name: "example-keyring", + * location: "us", + * }); + * const cryptoKey = new gcp.kms.CryptoKey("crypto_key", { + * name: "example-key", + * keyRing: keyRing.id, + * }); + * const queryConfigCmek = new gcp.bigquery.DataTransferConfig("query_config_cmek", { + * displayName: "", + * location: "asia-northeast1", + * dataSourceId: "scheduled_query", + * schedule: "first sunday of quarter 00:00", + * destinationDatasetId: myDataset.datasetId, + * params: { + * destination_table_name_template: "my_table", + * write_disposition: "WRITE_APPEND", + * query: "SELECT name FROM tabl WHERE x = 'y'", + * }, + * encryptionConfiguration: { + * kmsKeyName: cryptoKey.id, + * }, + * }, { + * dependsOn: [permissions], + * }); + * ``` * ### Bigquerydatatransfer Config Salesforce * * ```typescript @@ -74,9 +120,7 @@ import * as utilities from "../utilities"; * params: { * "connector.authentication.oauth.clientId": "client-id", * "connector.authentication.oauth.clientSecret": "client-secret", - * "connector.authentication.username": "username", - * "connector.authentication.password": "password", - * "connector.authentication.securityToken": "security-token", + * "connector.authentication.oauth.myDomain": "MyDomainName", * assets: "[\"asset-a\",\"asset-b\"]", * }, * }); @@ -152,6 +196,11 @@ export class DataTransferConfig extends pulumi.CustomResource { * Structure is documented below. */ public readonly emailPreferences!: pulumi.Output; + /** + * Represents the encryption configuration for a transfer. + * Structure is documented below. + */ + public readonly encryptionConfiguration!: pulumi.Output; /** * The geographic location where the transfer config should reside. * Examples: US, EU, asia-northeast1. The default value is US. @@ -238,6 +287,7 @@ export class DataTransferConfig extends pulumi.CustomResource { resourceInputs["disabled"] = state ? state.disabled : undefined; resourceInputs["displayName"] = state ? state.displayName : undefined; resourceInputs["emailPreferences"] = state ? state.emailPreferences : undefined; + resourceInputs["encryptionConfiguration"] = state ? state.encryptionConfiguration : undefined; resourceInputs["location"] = state ? state.location : undefined; resourceInputs["name"] = state ? state.name : undefined; resourceInputs["notificationPubsubTopic"] = state ? state.notificationPubsubTopic : undefined; @@ -264,6 +314,7 @@ export class DataTransferConfig extends pulumi.CustomResource { resourceInputs["disabled"] = args ? args.disabled : undefined; resourceInputs["displayName"] = args ? args.displayName : undefined; resourceInputs["emailPreferences"] = args ? args.emailPreferences : undefined; + resourceInputs["encryptionConfiguration"] = args ? args.encryptionConfiguration : undefined; resourceInputs["location"] = args ? args.location : undefined; resourceInputs["notificationPubsubTopic"] = args ? args.notificationPubsubTopic : undefined; resourceInputs["params"] = args ? args.params : undefined; @@ -313,6 +364,11 @@ export interface DataTransferConfigState { * Structure is documented below. */ emailPreferences?: pulumi.Input; + /** + * Represents the encryption configuration for a transfer. + * Structure is documented below. + */ + encryptionConfiguration?: pulumi.Input; /** * The geographic location where the transfer config should reside. * Examples: US, EU, asia-northeast1. The default value is US. @@ -415,6 +471,11 @@ export interface DataTransferConfigArgs { * Structure is documented below. */ emailPreferences?: pulumi.Input; + /** + * Represents the encryption configuration for a transfer. + * Structure is documented below. + */ + encryptionConfiguration?: pulumi.Input; /** * The geographic location where the transfer config should reside. * Examples: US, EU, asia-northeast1. The default value is US. diff --git a/sdk/nodejs/bigqueryanalyticshub/dataExchange.ts b/sdk/nodejs/bigqueryanalyticshub/dataExchange.ts index 1c3e42c87f..ebbbe9ae71 100644 --- a/sdk/nodejs/bigqueryanalyticshub/dataExchange.ts +++ b/sdk/nodejs/bigqueryanalyticshub/dataExchange.ts @@ -2,6 +2,8 @@ // *** Do not edit by hand unless you're certain you know what you are doing! *** import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "../types/input"; +import * as outputs from "../types/output"; import * as utilities from "../utilities"; /** @@ -28,6 +30,22 @@ import * as utilities from "../utilities"; * description: "example data exchange", * }); * ``` + * ### Bigquery Analyticshub Data Exchange Dcr + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const dataExchange = new gcp.bigqueryanalyticshub.DataExchange("data_exchange", { + * location: "US", + * dataExchangeId: "dcr_data_exchange", + * displayName: "dcr_data_exchange", + * description: "example dcr data exchange", + * sharingEnvironmentConfig: { + * dcrExchangeConfig: {}, + * }, + * }); + * ``` * * ## Import * @@ -132,6 +150,12 @@ export class DataExchange extends pulumi.CustomResource { * If it is not provided, the provider project is used. */ public readonly project!: pulumi.Output; + /** + * Configurable data sharing environment option for a data exchange. + * This field is required for data clean room exchanges. + * Structure is documented below. + */ + public readonly sharingEnvironmentConfig!: pulumi.Output; /** * Create a DataExchange resource with the given unique name, arguments, and options. @@ -156,6 +180,7 @@ export class DataExchange extends pulumi.CustomResource { resourceInputs["name"] = state ? state.name : undefined; resourceInputs["primaryContact"] = state ? state.primaryContact : undefined; resourceInputs["project"] = state ? state.project : undefined; + resourceInputs["sharingEnvironmentConfig"] = state ? state.sharingEnvironmentConfig : undefined; } else { const args = argsOrState as DataExchangeArgs | undefined; if ((!args || args.dataExchangeId === undefined) && !opts.urn) { @@ -175,6 +200,7 @@ export class DataExchange extends pulumi.CustomResource { resourceInputs["location"] = args ? args.location : undefined; resourceInputs["primaryContact"] = args ? args.primaryContact : undefined; resourceInputs["project"] = args ? args.project : undefined; + resourceInputs["sharingEnvironmentConfig"] = args ? args.sharingEnvironmentConfig : undefined; resourceInputs["listingCount"] = undefined /*out*/; resourceInputs["name"] = undefined /*out*/; } @@ -232,6 +258,12 @@ export interface DataExchangeState { * If it is not provided, the provider project is used. */ project?: pulumi.Input; + /** + * Configurable data sharing environment option for a data exchange. + * This field is required for data clean room exchanges. + * Structure is documented below. + */ + sharingEnvironmentConfig?: pulumi.Input; } /** @@ -274,4 +306,10 @@ export interface DataExchangeArgs { * If it is not provided, the provider project is used. */ project?: pulumi.Input; + /** + * Configurable data sharing environment option for a data exchange. + * This field is required for data clean room exchanges. + * Structure is documented below. + */ + sharingEnvironmentConfig?: pulumi.Input; } diff --git a/sdk/nodejs/bigqueryanalyticshub/listing.ts b/sdk/nodejs/bigqueryanalyticshub/listing.ts index 1598a5274a..4c666dc0c3 100644 --- a/sdk/nodejs/bigqueryanalyticshub/listing.ts +++ b/sdk/nodejs/bigqueryanalyticshub/listing.ts @@ -79,6 +79,67 @@ import * as utilities from "../utilities"; * }, * }); * ``` + * ### Bigquery Analyticshub Listing Dcr + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const listing = new gcp.bigqueryanalyticshub.DataExchange("listing", { + * location: "US", + * dataExchangeId: "dcr_data_exchange", + * displayName: "dcr_data_exchange", + * description: "example dcr data exchange", + * sharingEnvironmentConfig: { + * dcrExchangeConfig: {}, + * }, + * }); + * const listingDataset = new gcp.bigquery.Dataset("listing", { + * datasetId: "dcr_listing", + * friendlyName: "dcr_listing", + * description: "example dcr data exchange", + * location: "US", + * }); + * const listingTable = new gcp.bigquery.Table("listing", { + * deletionProtection: false, + * tableId: "dcr_listing", + * datasetId: listingDataset.datasetId, + * schema: `[ + * { + * "name": "name", + * "type": "STRING", + * "mode": "NULLABLE" + * }, + * { + * "name": "post_abbr", + * "type": "STRING", + * "mode": "NULLABLE" + * }, + * { + * "name": "date", + * "type": "DATE", + * "mode": "NULLABLE" + * } + * ] + * `, + * }); + * const listingListing = new gcp.bigqueryanalyticshub.Listing("listing", { + * location: "US", + * dataExchangeId: listing.dataExchangeId, + * listingId: "dcr_listing", + * displayName: "dcr_listing", + * description: "example dcr data exchange", + * bigqueryDataset: { + * dataset: listingDataset.id, + * selectedResources: [{ + * table: listingTable.id, + * }], + * }, + * restrictedExportConfig: { + * enabled: true, + * }, + * }); + * ``` * * ## Import * diff --git a/sdk/nodejs/bigtable/table.ts b/sdk/nodejs/bigtable/table.ts index aa02f800dd..a8fc5c7785 100644 --- a/sdk/nodejs/bigtable/table.ts +++ b/sdk/nodejs/bigtable/table.ts @@ -40,6 +40,23 @@ import * as utilities from "../utilities"; * }, * { * family: "family-second", + * type: "intsum", + * }, + * { + * family: "family-third", + * type: ` { + * \x09\x09\x09\x09\x09"aggregateType": { + * \x09\x09\x09\x09\x09\x09"max": {}, + * \x09\x09\x09\x09\x09\x09"inputType": { + * \x09\x09\x09\x09\x09\x09\x09"int64Type": { + * \x09\x09\x09\x09\x09\x09\x09\x09"encoding": { + * \x09\x09\x09\x09\x09\x09\x09\x09\x09"bigEndianBytes": {} + * \x09\x09\x09\x09\x09\x09\x09\x09} + * \x09\x09\x09\x09\x09\x09\x09} + * \x09\x09\x09\x09\x09\x09} + * \x09\x09\x09\x09\x09} + * \x09\x09\x09\x09} + * `, * }, * ], * changeStreamRetention: "24h0m0s", diff --git a/sdk/nodejs/certificateauthority/authority.ts b/sdk/nodejs/certificateauthority/authority.ts index a821d9d578..8d67167c6b 100644 --- a/sdk/nodejs/certificateauthority/authority.ts +++ b/sdk/nodejs/certificateauthority/authority.ts @@ -365,7 +365,8 @@ export class Authority extends pulumi.CustomResource { public /*out*/ readonly createTime!: pulumi.Output; public readonly deletionProtection!: pulumi.Output; /** - * Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + * Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + * ENABLED, DISABLED, STAGED. */ public readonly desiredState!: pulumi.Output; /** @@ -573,7 +574,8 @@ export interface AuthorityState { createTime?: pulumi.Input; deletionProtection?: pulumi.Input; /** - * Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + * Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + * ENABLED, DISABLED, STAGED. */ desiredState?: pulumi.Input; /** @@ -685,7 +687,8 @@ export interface AuthorityArgs { config: pulumi.Input; deletionProtection?: pulumi.Input; /** - * Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + * Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + * ENABLED, DISABLED, STAGED. */ desiredState?: pulumi.Input; /** diff --git a/sdk/nodejs/certificatemanager/certificate.ts b/sdk/nodejs/certificatemanager/certificate.ts index 10968d9ea3..95ef0a8430 100644 --- a/sdk/nodejs/certificatemanager/certificate.ts +++ b/sdk/nodejs/certificatemanager/certificate.ts @@ -394,6 +394,10 @@ export class Certificate extends pulumi.CustomResource { * and default labels configured on the provider. */ public /*out*/ readonly pulumiLabels!: pulumi.Output<{[key: string]: string}>; + /** + * The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + */ + public /*out*/ readonly sanDnsnames!: pulumi.Output; /** * The scope of the certificate. * DEFAULT: Certificates with default scope are served from core Google data centers. @@ -433,6 +437,7 @@ export class Certificate extends pulumi.CustomResource { resourceInputs["name"] = state ? state.name : undefined; resourceInputs["project"] = state ? state.project : undefined; resourceInputs["pulumiLabels"] = state ? state.pulumiLabels : undefined; + resourceInputs["sanDnsnames"] = state ? state.sanDnsnames : undefined; resourceInputs["scope"] = state ? state.scope : undefined; resourceInputs["selfManaged"] = state ? state.selfManaged : undefined; } else { @@ -447,6 +452,7 @@ export class Certificate extends pulumi.CustomResource { resourceInputs["selfManaged"] = args ? args.selfManaged : undefined; resourceInputs["effectiveLabels"] = undefined /*out*/; resourceInputs["pulumiLabels"] = undefined /*out*/; + resourceInputs["sanDnsnames"] = undefined /*out*/; } opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); const secretOpts = { additionalSecretOutputs: ["effectiveLabels", "pulumiLabels"] }; @@ -503,6 +509,10 @@ export interface CertificateState { * and default labels configured on the provider. */ pulumiLabels?: pulumi.Input<{[key: string]: pulumi.Input}>; + /** + * The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + */ + sanDnsnames?: pulumi.Input[]>; /** * The scope of the certificate. * DEFAULT: Certificates with default scope are served from core Google data centers. diff --git a/sdk/nodejs/certificatemanager/getCertificates.ts b/sdk/nodejs/certificatemanager/getCertificates.ts new file mode 100644 index 0000000000..0cdbad25ab --- /dev/null +++ b/sdk/nodejs/certificatemanager/getCertificates.ts @@ -0,0 +1,107 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "../types/input"; +import * as outputs from "../types/output"; +import * as utilities from "../utilities"; + +/** + * List all certificates within Google Certificate Manager for a given project, region or filter. + * + * ## Example Usage + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const default = gcp.certificatemanager.getCertificates({}); + * ``` + * + * ### With A Filter + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const default = gcp.certificatemanager.getCertificates({ + * filter: "name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*", + * }); + * ``` + */ +export function getCertificates(args?: GetCertificatesArgs, opts?: pulumi.InvokeOptions): Promise { + args = args || {}; + + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts || {}); + return pulumi.runtime.invoke("gcp:certificatemanager/getCertificates:getCertificates", { + "filter": args.filter, + "region": args.region, + }, opts); +} + +/** + * A collection of arguments for invoking getCertificates. + */ +export interface GetCertificatesArgs { + /** + * Filter expression to restrict the certificates returned. + */ + filter?: string; + /** + * The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + */ + region?: string; +} + +/** + * A collection of values returned by getCertificates. + */ +export interface GetCertificatesResult { + readonly certificates: outputs.certificatemanager.GetCertificatesCertificate[]; + readonly filter?: string; + /** + * The provider-assigned unique ID for this managed resource. + */ + readonly id: string; + readonly region?: string; +} +/** + * List all certificates within Google Certificate Manager for a given project, region or filter. + * + * ## Example Usage + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const default = gcp.certificatemanager.getCertificates({}); + * ``` + * + * ### With A Filter + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const default = gcp.certificatemanager.getCertificates({ + * filter: "name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*", + * }); + * ``` + */ +export function getCertificatesOutput(args?: GetCertificatesOutputArgs, opts?: pulumi.InvokeOptions): pulumi.Output { + return pulumi.output(args).apply((a: any) => getCertificates(a, opts)) +} + +/** + * A collection of arguments for invoking getCertificates. + */ +export interface GetCertificatesOutputArgs { + /** + * Filter expression to restrict the certificates returned. + */ + filter?: pulumi.Input; + /** + * The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + */ + region?: pulumi.Input; +} diff --git a/sdk/nodejs/certificatemanager/index.ts b/sdk/nodejs/certificatemanager/index.ts index a25cf6e79a..29a3210959 100644 --- a/sdk/nodejs/certificatemanager/index.ts +++ b/sdk/nodejs/certificatemanager/index.ts @@ -35,6 +35,11 @@ export const getCertificateMap: typeof import("./getCertificateMap").getCertific export const getCertificateMapOutput: typeof import("./getCertificateMap").getCertificateMapOutput = null as any; utilities.lazyLoad(exports, ["getCertificateMap","getCertificateMapOutput"], () => require("./getCertificateMap")); +export { GetCertificatesArgs, GetCertificatesResult, GetCertificatesOutputArgs } from "./getCertificates"; +export const getCertificates: typeof import("./getCertificates").getCertificates = null as any; +export const getCertificatesOutput: typeof import("./getCertificates").getCertificatesOutput = null as any; +utilities.lazyLoad(exports, ["getCertificates","getCertificatesOutput"], () => require("./getCertificates")); + export { TrustConfigArgs, TrustConfigState } from "./trustConfig"; export type TrustConfig = import("./trustConfig").TrustConfig; export const TrustConfig: typeof import("./trustConfig").TrustConfig = null as any; diff --git a/sdk/nodejs/cloudrunv2/service.ts b/sdk/nodejs/cloudrunv2/service.ts index 04d7993e66..a0e759c1b7 100644 --- a/sdk/nodejs/cloudrunv2/service.ts +++ b/sdk/nodejs/cloudrunv2/service.ts @@ -356,7 +356,6 @@ import * as utilities from "../utilities"; * name: "cloudrun-service", * location: "us-central1", * deletionProtection: false, - * launchStage: "BETA", * template: { * executionEnvironment: "EXECUTION_ENVIRONMENT_GEN2", * containers: [{ @@ -400,7 +399,6 @@ import * as utilities from "../utilities"; * location: "us-central1", * deletionProtection: false, * ingress: "INGRESS_TRAFFIC_ALL", - * launchStage: "BETA", * template: { * executionEnvironment: "EXECUTION_ENVIRONMENT_GEN2", * containers: [{ @@ -427,6 +425,34 @@ import * as utilities from "../utilities"; * }, * }); * ``` + * ### Cloudrunv2 Service Mesh + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * import * as time from "@pulumi/time"; + * + * const mesh = new gcp.networkservices.Mesh("mesh", {name: "network-services-mesh"}); + * const waitForMesh = new time.index.Sleep("wait_for_mesh", {createDuration: "1m"}, { + * dependsOn: [mesh], + * }); + * const _default = new gcp.cloudrunv2.Service("default", { + * name: "cloudrun-service", + * deletionProtection: false, + * location: "us-central1", + * launchStage: "BETA", + * template: { + * containers: [{ + * image: "us-docker.pkg.dev/cloudrun/container/hello", + * }], + * serviceMesh: { + * mesh: mesh.id, + * }, + * }, + * }, { + * dependsOn: [waitForMesh], + * }); + * ``` * * ## Import * diff --git a/sdk/nodejs/cloudtasks/queue.ts b/sdk/nodejs/cloudtasks/queue.ts index b1a9365729..3eeacccb89 100644 --- a/sdk/nodejs/cloudtasks/queue.ts +++ b/sdk/nodejs/cloudtasks/queue.ts @@ -52,6 +52,102 @@ import * as utilities from "../utilities"; * }, * }); * ``` + * ### Cloud Tasks Queue Http Target Oidc + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const oidcServiceAccount = new gcp.serviceaccount.Account("oidc_service_account", { + * accountId: "example-oidc", + * displayName: "Tasks Queue OIDC Service Account", + * }); + * const httpTargetOidc = new gcp.cloudtasks.Queue("http_target_oidc", { + * name: "cloud-tasks-queue-http-target-oidc", + * location: "us-central1", + * httpTarget: { + * httpMethod: "POST", + * uriOverride: { + * scheme: "HTTPS", + * host: "oidc.example.com", + * port: "8443", + * pathOverride: { + * path: "/users/1234", + * }, + * queryOverride: { + * queryParams: "qparam1=123&qparam2=456", + * }, + * uriOverrideEnforceMode: "IF_NOT_EXISTS", + * }, + * headerOverrides: [ + * { + * header: { + * key: "AddSomethingElse", + * value: "MyOtherValue", + * }, + * }, + * { + * header: { + * key: "AddMe", + * value: "MyValue", + * }, + * }, + * ], + * oidcToken: { + * serviceAccountEmail: oidcServiceAccount.email, + * audience: "https://oidc.example.com", + * }, + * }, + * }); + * ``` + * ### Cloud Tasks Queue Http Target Oauth + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const oauthServiceAccount = new gcp.serviceaccount.Account("oauth_service_account", { + * accountId: "example-oauth", + * displayName: "Tasks Queue OAuth Service Account", + * }); + * const httpTargetOauth = new gcp.cloudtasks.Queue("http_target_oauth", { + * name: "cloud-tasks-queue-http-target-oauth", + * location: "us-central1", + * httpTarget: { + * httpMethod: "POST", + * uriOverride: { + * scheme: "HTTPS", + * host: "oauth.example.com", + * port: "8443", + * pathOverride: { + * path: "/users/1234", + * }, + * queryOverride: { + * queryParams: "qparam1=123&qparam2=456", + * }, + * uriOverrideEnforceMode: "IF_NOT_EXISTS", + * }, + * headerOverrides: [ + * { + * header: { + * key: "AddSomethingElse", + * value: "MyOtherValue", + * }, + * }, + * { + * header: { + * key: "AddMe", + * value: "MyValue", + * }, + * }, + * ], + * oauthToken: { + * serviceAccountEmail: oauthServiceAccount.email, + * scope: "openid https://www.googleapis.com/auth/userinfo.email", + * }, + * }, + * }); + * ``` * * ## Import * @@ -111,6 +207,11 @@ export class Queue extends pulumi.CustomResource { * Structure is documented below. */ public readonly appEngineRoutingOverride!: pulumi.Output; + /** + * Modifies HTTP target for HTTP tasks. + * Structure is documented below. + */ + public readonly httpTarget!: pulumi.Output; /** * The location of the queue * @@ -163,6 +264,7 @@ export class Queue extends pulumi.CustomResource { if (opts.id) { const state = argsOrState as QueueState | undefined; resourceInputs["appEngineRoutingOverride"] = state ? state.appEngineRoutingOverride : undefined; + resourceInputs["httpTarget"] = state ? state.httpTarget : undefined; resourceInputs["location"] = state ? state.location : undefined; resourceInputs["name"] = state ? state.name : undefined; resourceInputs["project"] = state ? state.project : undefined; @@ -175,6 +277,7 @@ export class Queue extends pulumi.CustomResource { throw new Error("Missing required property 'location'"); } resourceInputs["appEngineRoutingOverride"] = args ? args.appEngineRoutingOverride : undefined; + resourceInputs["httpTarget"] = args ? args.httpTarget : undefined; resourceInputs["location"] = args ? args.location : undefined; resourceInputs["name"] = args ? args.name : undefined; resourceInputs["project"] = args ? args.project : undefined; @@ -197,6 +300,11 @@ export interface QueueState { * Structure is documented below. */ appEngineRoutingOverride?: pulumi.Input; + /** + * Modifies HTTP target for HTTP tasks. + * Structure is documented below. + */ + httpTarget?: pulumi.Input; /** * The location of the queue * @@ -246,6 +354,11 @@ export interface QueueArgs { * Structure is documented below. */ appEngineRoutingOverride?: pulumi.Input; + /** + * Modifies HTTP target for HTTP tasks. + * Structure is documented below. + */ + httpTarget?: pulumi.Input; /** * The location of the queue * diff --git a/sdk/nodejs/compute/getInstance.ts b/sdk/nodejs/compute/getInstance.ts index 53045fd862..77c5528a2a 100644 --- a/sdk/nodejs/compute/getInstance.ts +++ b/sdk/nodejs/compute/getInstance.ts @@ -86,7 +86,7 @@ export interface GetInstanceResult { */ readonly cpuPlatform: string; /** - * The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + * The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). */ readonly currentStatus: string; /** diff --git a/sdk/nodejs/compute/healthCheck.ts b/sdk/nodejs/compute/healthCheck.ts index 5211b4b5fb..22da083cf8 100644 --- a/sdk/nodejs/compute/healthCheck.ts +++ b/sdk/nodejs/compute/healthCheck.ts @@ -269,6 +269,66 @@ import * as utilities from "../utilities"; * }, * }); * ``` + * ### Compute Health Check Http Source Regions + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const http_health_check_with_source_regions = new gcp.compute.HealthCheck("http-health-check-with-source-regions", { + * name: "http-health-check", + * checkIntervalSec: 30, + * httpHealthCheck: { + * port: 80, + * portSpecification: "USE_FIXED_PORT", + * }, + * sourceRegions: [ + * "us-west1", + * "us-central1", + * "us-east5", + * ], + * }); + * ``` + * ### Compute Health Check Https Source Regions + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const https_health_check_with_source_regions = new gcp.compute.HealthCheck("https-health-check-with-source-regions", { + * name: "https-health-check", + * checkIntervalSec: 30, + * httpsHealthCheck: { + * port: 80, + * portSpecification: "USE_FIXED_PORT", + * }, + * sourceRegions: [ + * "us-west1", + * "us-central1", + * "us-east5", + * ], + * }); + * ``` + * ### Compute Health Check Tcp Source Regions + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const tcp_health_check_with_source_regions = new gcp.compute.HealthCheck("tcp-health-check-with-source-regions", { + * name: "tcp-health-check", + * checkIntervalSec: 30, + * tcpHealthCheck: { + * port: 80, + * portSpecification: "USE_FIXED_PORT", + * }, + * sourceRegions: [ + * "us-west1", + * "us-central1", + * "us-east5", + * ], + * }); + * ``` * * ## Import * diff --git a/sdk/nodejs/compute/instance.ts b/sdk/nodejs/compute/instance.ts index 6efe086961..bd870ff731 100644 --- a/sdk/nodejs/compute/instance.ts +++ b/sdk/nodejs/compute/instance.ts @@ -56,6 +56,49 @@ import * as utilities from "../utilities"; * }); * ``` * + * ### Confidential Computing + * + * Example with [Confidential Mode](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) activated. + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const _default = new gcp.serviceaccount.Account("default", { + * accountId: "my-custom-sa", + * displayName: "Custom SA for VM Instance", + * }); + * const confidentialInstance = new gcp.compute.Instance("confidential_instance", { + * networkInterfaces: [{ + * accessConfigs: [{}], + * network: "default", + * }], + * name: "my-confidential-instance", + * zone: "us-central1-a", + * machineType: "n2d-standard-2", + * minCpuPlatform: "AMD Milan", + * confidentialInstanceConfig: { + * enableConfidentialCompute: true, + * confidentialInstanceType: "SEV", + * }, + * bootDisk: { + * initializeParams: { + * image: "ubuntu-os-cloud/ubuntu-2004-lts", + * labels: { + * my_label: "value", + * }, + * }, + * }, + * scratchDisks: [{ + * "interface": "NVME", + * }], + * serviceAccount: { + * email: _default.email, + * scopes: ["cloud-platform"], + * }, + * }); + * ``` + * * ## Import * * Instances can be imported using any of these accepted formats: @@ -141,7 +184,7 @@ export class Instance extends pulumi.CustomResource { */ public /*out*/ readonly cpuPlatform!: pulumi.Output; /** - * The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + * The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). */ public /*out*/ readonly currentStatus!: pulumi.Output; /** @@ -477,7 +520,7 @@ export interface InstanceState { */ cpuPlatform?: pulumi.Input; /** - * The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + * The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). */ currentStatus?: pulumi.Input; /** diff --git a/sdk/nodejs/compute/instanceTemplate.ts b/sdk/nodejs/compute/instanceTemplate.ts index 3dbe6f7c75..c553e3e8e6 100644 --- a/sdk/nodejs/compute/instanceTemplate.ts +++ b/sdk/nodejs/compute/instanceTemplate.ts @@ -164,6 +164,41 @@ import * as utilities from "../utilities"; * }); * ``` * + * ### Confidential Computing + * + * Example with [Confidential Mode](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) activated. + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const _default = new gcp.serviceaccount.Account("default", { + * accountId: "my-custom-sa", + * displayName: "Custom SA for VM Instance", + * }); + * const confidentialInstanceTemplate = new gcp.compute.InstanceTemplate("confidential_instance_template", { + * networkInterfaces: [{ + * accessConfigs: [{}], + * network: "default", + * }], + * name: "my-confidential-instance-template", + * region: "us-central1", + * machineType: "n2d-standard-2", + * minCpuPlatform: "AMD Milan", + * confidentialInstanceConfig: { + * enableConfidentialCompute: true, + * confidentialInstanceType: "SEV", + * }, + * disks: [{ + * sourceImage: "ubuntu-os-cloud/ubuntu-2004-lts", + * }], + * serviceAccount: { + * email: _default.email, + * scopes: ["cloud-platform"], + * }, + * }); + * ``` + * * ## Deploying the Latest Image * * A common way to use instance templates and managed instance groups is to deploy the diff --git a/sdk/nodejs/compute/interconnect.ts b/sdk/nodejs/compute/interconnect.ts index 86e2cea989..2d0641a580 100644 --- a/sdk/nodejs/compute/interconnect.ts +++ b/sdk/nodejs/compute/interconnect.ts @@ -243,11 +243,12 @@ export class Interconnect extends pulumi.CustomResource { */ public readonly remoteLocation!: pulumi.Output; /** - * interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. */ public readonly requestedFeatures!: pulumi.Output; /** @@ -528,11 +529,12 @@ export interface InterconnectState { */ remoteLocation?: pulumi.Input; /** - * interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. */ requestedFeatures?: pulumi.Input[]>; /** @@ -644,11 +646,12 @@ export interface InterconnectArgs { */ remoteLocation?: pulumi.Input; /** - * interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + * interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( * If specified then the connection is created on MACsec capable hardware ports. If not * specified, the default value is false, which allocates non-MACsec capable ports first if - * available). - * Each value may be one of: `MACSEC`. + * available). Note that MACSEC is still technically allowed for compatibility reasons, but it + * does not work with the API, and will be removed in an upcoming major version. + * Each value may be one of: `MACSEC`, `IF_MACSEC`. */ requestedFeatures?: pulumi.Input[]>; /** diff --git a/sdk/nodejs/compute/nodeTemplate.ts b/sdk/nodejs/compute/nodeTemplate.ts index 336b5b70b6..2d36e9fb3b 100644 --- a/sdk/nodejs/compute/nodeTemplate.ts +++ b/sdk/nodejs/compute/nodeTemplate.ts @@ -52,6 +52,25 @@ import * as utilities from "../utilities"; * }, * }); * ``` + * ### Node Template Accelerators + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const central1a = gcp.compute.getNodeTypes({ + * zone: "us-central1-a", + * }); + * const template = new gcp.compute.NodeTemplate("template", { + * name: "soletenant-with-accelerators", + * region: "us-central1", + * nodeType: "n1-node-96-624", + * accelerators: [{ + * acceleratorType: "nvidia-tesla-t4", + * acceleratorCount: 4, + * }], + * }); + * ``` * * ## Import * @@ -111,6 +130,12 @@ export class NodeTemplate extends pulumi.CustomResource { return obj['__pulumiType'] === NodeTemplate.__pulumiType; } + /** + * List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + */ + public readonly accelerators!: pulumi.Output; /** * CPU overcommit. * Default value is `NONE`. @@ -181,6 +206,7 @@ export class NodeTemplate extends pulumi.CustomResource { opts = opts || {}; if (opts.id) { const state = argsOrState as NodeTemplateState | undefined; + resourceInputs["accelerators"] = state ? state.accelerators : undefined; resourceInputs["cpuOvercommitType"] = state ? state.cpuOvercommitType : undefined; resourceInputs["creationTimestamp"] = state ? state.creationTimestamp : undefined; resourceInputs["description"] = state ? state.description : undefined; @@ -194,6 +220,7 @@ export class NodeTemplate extends pulumi.CustomResource { resourceInputs["serverBinding"] = state ? state.serverBinding : undefined; } else { const args = argsOrState as NodeTemplateArgs | undefined; + resourceInputs["accelerators"] = args ? args.accelerators : undefined; resourceInputs["cpuOvercommitType"] = args ? args.cpuOvercommitType : undefined; resourceInputs["description"] = args ? args.description : undefined; resourceInputs["name"] = args ? args.name : undefined; @@ -215,6 +242,12 @@ export class NodeTemplate extends pulumi.CustomResource { * Input properties used for looking up and filtering NodeTemplate resources. */ export interface NodeTemplateState { + /** + * List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + */ + accelerators?: pulumi.Input[]>; /** * CPU overcommit. * Default value is `NONE`. @@ -277,6 +310,12 @@ export interface NodeTemplateState { * The set of arguments for constructing a NodeTemplate resource. */ export interface NodeTemplateArgs { + /** + * List of the type and count of accelerator cards attached to the + * node template + * Structure is documented below. + */ + accelerators?: pulumi.Input[]>; /** * CPU overcommit. * Default value is `NONE`. diff --git a/sdk/nodejs/compute/targetHttpsProxy.ts b/sdk/nodejs/compute/targetHttpsProxy.ts index 46d94f2e25..d35529770a 100644 --- a/sdk/nodejs/compute/targetHttpsProxy.ts +++ b/sdk/nodejs/compute/targetHttpsProxy.ts @@ -387,6 +387,10 @@ export class TargetHttpsProxy extends pulumi.CustomResource { * INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED * loadBalancingScheme consult ServerTlsPolicy documentation. * If left blank, communications are not encrypted. + * If you remove this field from your configuration at the same time as + * deleting or recreating a referenced ServerTlsPolicy resource, you will + * receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + * within the ServerTlsPolicy resource to avoid this. */ public readonly serverTlsPolicy!: pulumi.Output; /** @@ -557,6 +561,10 @@ export interface TargetHttpsProxyState { * INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED * loadBalancingScheme consult ServerTlsPolicy documentation. * If left blank, communications are not encrypted. + * If you remove this field from your configuration at the same time as + * deleting or recreating a referenced ServerTlsPolicy resource, you will + * receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + * within the ServerTlsPolicy resource to avoid this. */ serverTlsPolicy?: pulumi.Input; /** @@ -660,6 +668,10 @@ export interface TargetHttpsProxyArgs { * INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED * loadBalancingScheme consult ServerTlsPolicy documentation. * If left blank, communications are not encrypted. + * If you remove this field from your configuration at the same time as + * deleting or recreating a referenced ServerTlsPolicy resource, you will + * receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + * within the ServerTlsPolicy resource to avoid this. */ serverTlsPolicy?: pulumi.Input; /** diff --git a/sdk/nodejs/container/attachedCluster.ts b/sdk/nodejs/container/attachedCluster.ts index 0eceb8ffef..c191e3168c 100644 --- a/sdk/nodejs/container/attachedCluster.ts +++ b/sdk/nodejs/container/attachedCluster.ts @@ -217,7 +217,7 @@ export class AttachedCluster extends pulumi.CustomResource { */ public /*out*/ readonly createTime!: pulumi.Output; /** - * Policy to determine what flags to send on delete. + * Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS */ public readonly deletionPolicy!: pulumi.Output; /** @@ -425,7 +425,7 @@ export interface AttachedClusterState { */ createTime?: pulumi.Input; /** - * Policy to determine what flags to send on delete. + * Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS */ deletionPolicy?: pulumi.Input; /** @@ -538,7 +538,7 @@ export interface AttachedClusterArgs { */ binaryAuthorization?: pulumi.Input; /** - * Policy to determine what flags to send on delete. + * Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS */ deletionPolicy?: pulumi.Input; /** diff --git a/sdk/nodejs/databasemigrationservice/connectionProfile.ts b/sdk/nodejs/databasemigrationservice/connectionProfile.ts index 5f6505f33e..1c4dff21d0 100644 --- a/sdk/nodejs/databasemigrationservice/connectionProfile.ts +++ b/sdk/nodejs/databasemigrationservice/connectionProfile.ts @@ -228,6 +228,122 @@ import * as utilities from "../utilities"; * dependsOn: [vpcConnection], * }); * ``` + * ### Database Migration Service Connection Profile Existing Mysql + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const project = gcp.organizations.getProject({}); + * const destinationCsql = new gcp.sql.DatabaseInstance("destination_csql", { + * name: "destination-csql", + * databaseVersion: "MYSQL_5_7", + * settings: { + * tier: "db-n1-standard-1", + * deletionProtectionEnabled: false, + * }, + * deletionProtection: false, + * }); + * const existing_mysql = new gcp.databasemigrationservice.ConnectionProfile("existing-mysql", { + * location: "us-central1", + * connectionProfileId: "destination-cp", + * displayName: "destination-cp_display", + * labels: { + * foo: "bar", + * }, + * mysql: { + * cloudSqlId: "destination-csql", + * }, + * }, { + * dependsOn: [destinationCsql], + * }); + * ``` + * ### Database Migration Service Connection Profile Existing Postgres + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const project = gcp.organizations.getProject({}); + * const destinationCsql = new gcp.sql.DatabaseInstance("destination_csql", { + * name: "destination-csql", + * databaseVersion: "POSTGRES_15", + * settings: { + * tier: "db-custom-2-13312", + * deletionProtectionEnabled: false, + * }, + * deletionProtection: false, + * }); + * const existing_psql = new gcp.databasemigrationservice.ConnectionProfile("existing-psql", { + * location: "us-central1", + * connectionProfileId: "destination-cp", + * displayName: "destination-cp_display", + * labels: { + * foo: "bar", + * }, + * postgresql: { + * cloudSqlId: "destination-csql", + * }, + * }, { + * dependsOn: [destinationCsql], + * }); + * ``` + * ### Database Migration Service Connection Profile Existing Alloydb + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const project = gcp.organizations.getProject({}); + * const _default = new gcp.compute.Network("default", {name: "destination-alloydb"}); + * const destinationAlloydb = new gcp.alloydb.Cluster("destination_alloydb", { + * clusterId: "destination-alloydb", + * location: "us-central1", + * networkConfig: { + * network: _default.id, + * }, + * databaseVersion: "POSTGRES_15", + * initialUser: { + * user: "destination-alloydb", + * password: "destination-alloydb", + * }, + * }); + * const privateIpAlloc = new gcp.compute.GlobalAddress("private_ip_alloc", { + * name: "destination-alloydb", + * addressType: "INTERNAL", + * purpose: "VPC_PEERING", + * prefixLength: 16, + * network: _default.id, + * }); + * const vpcConnection = new gcp.servicenetworking.Connection("vpc_connection", { + * network: _default.id, + * service: "servicenetworking.googleapis.com", + * reservedPeeringRanges: [privateIpAlloc.name], + * }); + * const destinationAlloydbPrimary = new gcp.alloydb.Instance("destination_alloydb_primary", { + * cluster: destinationAlloydb.name, + * instanceId: "destination-alloydb-primary", + * instanceType: "PRIMARY", + * }, { + * dependsOn: [vpcConnection], + * }); + * const existing_alloydb = new gcp.databasemigrationservice.ConnectionProfile("existing-alloydb", { + * location: "us-central1", + * connectionProfileId: "destination-cp", + * displayName: "destination-cp_display", + * labels: { + * foo: "bar", + * }, + * postgresql: { + * alloydbClusterId: "destination-alloydb", + * }, + * }, { + * dependsOn: [ + * destinationAlloydb, + * destinationAlloydbPrimary, + * ], + * }); + * ``` * * ## Import * diff --git a/sdk/nodejs/datastream/stream.ts b/sdk/nodejs/datastream/stream.ts index 7293d1a754..af4b1f9bb6 100644 --- a/sdk/nodejs/datastream/stream.ts +++ b/sdk/nodejs/datastream/stream.ts @@ -450,6 +450,103 @@ import * as utilities from "../utilities"; * }], * }], * }, + * transactionLogs: {}, + * }, + * }, + * destinationConfig: { + * destinationConnectionProfile: destination.id, + * bigqueryDestinationConfig: { + * dataFreshness: "900s", + * sourceHierarchyDatasets: { + * datasetTemplate: { + * location: "us-central1", + * }, + * }, + * }, + * }, + * backfillNone: {}, + * }); + * ``` + * ### Datastream Stream Sql Server Change Tables + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const instance = new gcp.sql.DatabaseInstance("instance", { + * name: "sql-server", + * databaseVersion: "SQLSERVER_2019_STANDARD", + * region: "us-central1", + * rootPassword: "root-password", + * deletionProtection: true, + * settings: { + * tier: "db-custom-2-4096", + * ipConfiguration: { + * authorizedNetworks: [ + * { + * value: "34.71.242.81", + * }, + * { + * value: "34.72.28.29", + * }, + * { + * value: "34.67.6.157", + * }, + * { + * value: "34.67.234.134", + * }, + * { + * value: "34.72.239.218", + * }, + * ], + * }, + * }, + * }); + * const user = new gcp.sql.User("user", { + * name: "user", + * instance: instance.name, + * password: "password", + * }); + * const db = new gcp.sql.Database("db", { + * name: "db", + * instance: instance.name, + * }, { + * dependsOn: [user], + * }); + * const source = new gcp.datastream.ConnectionProfile("source", { + * displayName: "SQL Server Source", + * location: "us-central1", + * connectionProfileId: "source-profile", + * sqlServerProfile: { + * hostname: instance.publicIpAddress, + * port: 1433, + * username: user.name, + * password: user.password, + * database: db.name, + * }, + * }); + * const destination = new gcp.datastream.ConnectionProfile("destination", { + * displayName: "BigQuery Destination", + * location: "us-central1", + * connectionProfileId: "destination-profile", + * bigqueryProfile: {}, + * }); + * const _default = new gcp.datastream.Stream("default", { + * displayName: "SQL Server to BigQuery", + * location: "us-central1", + * streamId: "stream", + * sourceConfig: { + * sourceConnectionProfile: source.id, + * sqlServerSourceConfig: { + * includeObjects: { + * schemas: [{ + * schema: "schema", + * tables: [{ + * table: "table", + * }], + * }], + * }, + * changeTables: {}, * }, * }, * destinationConfig: { @@ -824,7 +921,8 @@ export class Stream extends pulumi.CustomResource { */ public readonly customerManagedEncryptionKey!: pulumi.Output; /** - * Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + * Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + * values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED */ public readonly desiredState!: pulumi.Output; /** @@ -965,7 +1063,8 @@ export interface StreamState { */ customerManagedEncryptionKey?: pulumi.Input; /** - * Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + * Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + * values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED */ desiredState?: pulumi.Input; /** @@ -1037,7 +1136,8 @@ export interface StreamArgs { */ customerManagedEncryptionKey?: pulumi.Input; /** - * Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + * Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + * values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED */ desiredState?: pulumi.Input; /** diff --git a/sdk/nodejs/discoveryengine/dataStore.ts b/sdk/nodejs/discoveryengine/dataStore.ts index 01ef53fc12..d8faa13e82 100644 --- a/sdk/nodejs/discoveryengine/dataStore.ts +++ b/sdk/nodejs/discoveryengine/dataStore.ts @@ -154,7 +154,7 @@ export class DataStore extends pulumi.CustomResource { public readonly documentProcessingConfig!: pulumi.Output; /** * The industry vertical that the data store registers. - * Possible values are: `GENERIC`, `MEDIA`. + * Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. */ public readonly industryVertical!: pulumi.Output; /** @@ -186,7 +186,7 @@ export class DataStore extends pulumi.CustomResource { public readonly skipDefaultSchemaCreation!: pulumi.Output; /** * The solutions that the data store enrolls. - * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. */ public readonly solutionTypes!: pulumi.Output; @@ -294,7 +294,7 @@ export interface DataStoreState { documentProcessingConfig?: pulumi.Input; /** * The industry vertical that the data store registers. - * Possible values are: `GENERIC`, `MEDIA`. + * Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. */ industryVertical?: pulumi.Input; /** @@ -326,7 +326,7 @@ export interface DataStoreState { skipDefaultSchemaCreation?: pulumi.Input; /** * The solutions that the data store enrolls. - * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. */ solutionTypes?: pulumi.Input[]>; } @@ -365,7 +365,7 @@ export interface DataStoreArgs { documentProcessingConfig?: pulumi.Input; /** * The industry vertical that the data store registers. - * Possible values are: `GENERIC`, `MEDIA`. + * Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. */ industryVertical: pulumi.Input; /** @@ -390,7 +390,7 @@ export interface DataStoreArgs { skipDefaultSchemaCreation?: pulumi.Input; /** * The solutions that the data store enrolls. - * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + * Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. */ solutionTypes?: pulumi.Input[]>; } diff --git a/sdk/nodejs/firebase/databaseInstance.ts b/sdk/nodejs/firebase/databaseInstance.ts index b3948baa62..cc08625440 100644 --- a/sdk/nodejs/firebase/databaseInstance.ts +++ b/sdk/nodejs/firebase/databaseInstance.ts @@ -127,7 +127,7 @@ export class DatabaseInstance extends pulumi.CustomResource { */ public /*out*/ readonly databaseUrl!: pulumi.Output; /** - * The intended database state. + * The intended database state. Possible values: ACTIVE, DISABLED. */ public readonly desiredState!: pulumi.Output; /** @@ -222,7 +222,7 @@ export interface DatabaseInstanceState { */ databaseUrl?: pulumi.Input; /** - * The intended database state. + * The intended database state. Possible values: ACTIVE, DISABLED. */ desiredState?: pulumi.Input; /** @@ -270,7 +270,7 @@ export interface DatabaseInstanceState { */ export interface DatabaseInstanceArgs { /** - * The intended database state. + * The intended database state. Possible values: ACTIVE, DISABLED. */ desiredState?: pulumi.Input; /** diff --git a/sdk/nodejs/gkehub/featureMembership.ts b/sdk/nodejs/gkehub/featureMembership.ts index f39ee96de5..35a87f0017 100644 --- a/sdk/nodejs/gkehub/featureMembership.ts +++ b/sdk/nodejs/gkehub/featureMembership.ts @@ -42,8 +42,9 @@ import * as utilities from "../utilities"; * feature: feature.name, * membership: membership.membershipId, * configmanagement: { - * version: "1.6.2", + * version: "1.19.0", * configSync: { + * enabled: true, * git: { * syncRepo: "https://github.com/hashicorp/terraform", * }, @@ -82,8 +83,9 @@ import * as utilities from "../utilities"; * feature: feature.name, * membership: membership.membershipId, * configmanagement: { - * version: "1.15.1", + * version: "1.19.0", * configSync: { + * enabled: true, * oci: { * syncRepo: "us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest", * policyDir: "config-connector", @@ -177,8 +179,9 @@ import * as utilities from "../utilities"; * membership: membership.membershipId, * membershipLocation: membership.location, * configmanagement: { - * version: "1.6.2", + * version: "1.19.0", * configSync: { + * enabled: true, * git: { * syncRepo: "https://github.com/hashicorp/terraform", * }, diff --git a/sdk/nodejs/iam/getWorkloadIdentityPoolProvider.ts b/sdk/nodejs/iam/getWorkloadIdentityPoolProvider.ts index b2c23a5c3a..d5e5718abe 100644 --- a/sdk/nodejs/iam/getWorkloadIdentityPoolProvider.ts +++ b/sdk/nodejs/iam/getWorkloadIdentityPoolProvider.ts @@ -75,6 +75,7 @@ export interface GetWorkloadIdentityPoolProviderResult { readonly state: string; readonly workloadIdentityPoolId: string; readonly workloadIdentityPoolProviderId: string; + readonly x509s: outputs.iam.GetWorkloadIdentityPoolProviderX509[]; } /** * Get a IAM workload identity provider from Google Cloud by its id. diff --git a/sdk/nodejs/iam/workloadIdentityPoolProvider.ts b/sdk/nodejs/iam/workloadIdentityPoolProvider.ts index 7490e182da..ba68a47d52 100644 --- a/sdk/nodejs/iam/workloadIdentityPoolProvider.ts +++ b/sdk/nodejs/iam/workloadIdentityPoolProvider.ts @@ -188,6 +188,64 @@ import * as utilities from "../utilities"; * }, * }); * ``` + * ### Iam Workload Identity Pool Provider X509 Basic + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * import * as std from "@pulumi/std"; + * + * const pool = new gcp.iam.WorkloadIdentityPool("pool", {workloadIdentityPoolId: "example-pool"}); + * const example = new gcp.iam.WorkloadIdentityPoolProvider("example", { + * workloadIdentityPoolId: pool.workloadIdentityPoolId, + * workloadIdentityPoolProviderId: "example-prvdr", + * attributeMapping: { + * "google.subject": "assertion.subject.dn.cn", + * }, + * x509: { + * trustStore: { + * trustAnchors: [{ + * pemCertificate: std.file({ + * input: "test-fixtures/trust_anchor.pem", + * }).then(invoke => invoke.result), + * }], + * }, + * }, + * }); + * ``` + * ### Iam Workload Identity Pool Provider X509 Full + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * import * as std from "@pulumi/std"; + * + * const pool = new gcp.iam.WorkloadIdentityPool("pool", {workloadIdentityPoolId: "example-pool"}); + * const example = new gcp.iam.WorkloadIdentityPoolProvider("example", { + * workloadIdentityPoolId: pool.workloadIdentityPoolId, + * workloadIdentityPoolProviderId: "example-prvdr", + * displayName: "Name of provider", + * description: "X.509 identity pool provider for automated test", + * disabled: true, + * attributeMapping: { + * "google.subject": "assertion.subject.dn.cn", + * }, + * x509: { + * trustStore: { + * trustAnchors: [{ + * pemCertificate: std.file({ + * input: "test-fixtures/trust_anchor.pem", + * }).then(invoke => invoke.result), + * }], + * intermediateCas: [{ + * pemCertificate: std.file({ + * input: "test-fixtures/intermediate_ca.pem", + * }).then(invoke => invoke.result), + * }], + * }, + * }, + * }); + * ``` * * ## Import * @@ -366,6 +424,12 @@ export class WorkloadIdentityPoolProvider extends pulumi.CustomResource { * - - - */ public readonly workloadIdentityPoolProviderId!: pulumi.Output; + /** + * An X.509-type identity provider represents a CA. It is trusted to assert a + * client identity if the client has a certificate that chains up to this CA. + * Structure is documented below. + */ + public readonly x509!: pulumi.Output; /** * Create a WorkloadIdentityPoolProvider resource with the given unique name, arguments, and options. @@ -393,6 +457,7 @@ export class WorkloadIdentityPoolProvider extends pulumi.CustomResource { resourceInputs["state"] = state ? state.state : undefined; resourceInputs["workloadIdentityPoolId"] = state ? state.workloadIdentityPoolId : undefined; resourceInputs["workloadIdentityPoolProviderId"] = state ? state.workloadIdentityPoolProviderId : undefined; + resourceInputs["x509"] = state ? state.x509 : undefined; } else { const args = argsOrState as WorkloadIdentityPoolProviderArgs | undefined; if ((!args || args.workloadIdentityPoolId === undefined) && !opts.urn) { @@ -412,6 +477,7 @@ export class WorkloadIdentityPoolProvider extends pulumi.CustomResource { resourceInputs["saml"] = args ? args.saml : undefined; resourceInputs["workloadIdentityPoolId"] = args ? args.workloadIdentityPoolId : undefined; resourceInputs["workloadIdentityPoolProviderId"] = args ? args.workloadIdentityPoolProviderId : undefined; + resourceInputs["x509"] = args ? args.x509 : undefined; resourceInputs["name"] = undefined /*out*/; resourceInputs["state"] = undefined /*out*/; } @@ -549,6 +615,12 @@ export interface WorkloadIdentityPoolProviderState { * - - - */ workloadIdentityPoolProviderId?: pulumi.Input; + /** + * An X.509-type identity provider represents a CA. It is trusted to assert a + * client identity if the client has a certificate that chains up to this CA. + * Structure is documented below. + */ + x509?: pulumi.Input; } /** @@ -665,4 +737,10 @@ export interface WorkloadIdentityPoolProviderArgs { * - - - */ workloadIdentityPoolProviderId: pulumi.Input; + /** + * An X.509-type identity provider represents a CA. It is trusted to assert a + * client identity if the client has a certificate that chains up to this CA. + * Structure is documented below. + */ + x509?: pulumi.Input; } diff --git a/sdk/nodejs/kms/autokeyConfig.ts b/sdk/nodejs/kms/autokeyConfig.ts index 4c0d3c65e0..a4ae0ff9d9 100644 --- a/sdk/nodejs/kms/autokeyConfig.ts +++ b/sdk/nodejs/kms/autokeyConfig.ts @@ -67,11 +67,16 @@ import * as utilities from "../utilities"; * dependsOn: [autokeyProjectAdmin], * }); * const example_autokeyconfig = new gcp.kms.AutokeyConfig("example-autokeyconfig", { - * folder: autokmsFolder.folderId, + * folder: autokmsFolder.id, * keyProject: pulumi.interpolate`projects/${keyProject.projectId}`, * }, { * dependsOn: [waitSrvAccPermissions], * }); + * // Wait delay after setting AutokeyConfig, to prevent diffs on reapply, + * // because setting the config takes a little to fully propagate. + * const waitAutokeyPropagation = new time.index.Sleep("wait_autokey_propagation", {createDuration: "30s"}, { + * dependsOn: [example_autokeyconfig], + * }); * ``` * * ## Import diff --git a/sdk/nodejs/kms/getCryptoKeyLatestVersion.ts b/sdk/nodejs/kms/getCryptoKeyLatestVersion.ts new file mode 100644 index 0000000000..7919d74560 --- /dev/null +++ b/sdk/nodejs/kms/getCryptoKeyLatestVersion.ts @@ -0,0 +1,141 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "../types/input"; +import * as outputs from "../types/output"; +import * as utilities from "../utilities"; + +/** + * Provides access to the latest Google Cloud Platform KMS CryptoKeyVersion in a CryptoKey. For more information see + * [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + * and + * [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + * + * ## Example Usage + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const myKeyRing = gcp.kms.getKMSKeyRing({ + * name: "my-key-ring", + * location: "us-central1", + * }); + * const myCryptoKey = myKeyRing.then(myKeyRing => gcp.kms.getKMSCryptoKey({ + * name: "my-crypto-key", + * keyRing: myKeyRing.id, + * })); + * const myCryptoKeyLatestVersion = gcp.kms.getCryptoKeyLatestVersion({ + * cryptoKey: myKey.id, + * }); + * ``` + */ +export function getCryptoKeyLatestVersion(args: GetCryptoKeyLatestVersionArgs, opts?: pulumi.InvokeOptions): Promise { + + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts || {}); + return pulumi.runtime.invoke("gcp:kms/getCryptoKeyLatestVersion:getCryptoKeyLatestVersion", { + "cryptoKey": args.cryptoKey, + "filter": args.filter, + }, opts); +} + +/** + * A collection of arguments for invoking getCryptoKeyLatestVersion. + */ +export interface GetCryptoKeyLatestVersionArgs { + /** + * The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + */ + cryptoKey: string; + /** + * The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on state. + * + * * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + */ + filter?: string; +} + +/** + * A collection of values returned by getCryptoKeyLatestVersion. + */ +export interface GetCryptoKeyLatestVersionResult { + /** + * The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + */ + readonly algorithm: string; + readonly cryptoKey: string; + readonly filter?: string; + /** + * The provider-assigned unique ID for this managed resource. + */ + readonly id: string; + readonly name: string; + /** + * The ProtectionLevel describing how crypto operations are performed with this CryptoKeyVersion. See the [protectionLevel reference](https://cloud.google.com/kms/docs/reference/rest/v1/ProtectionLevel) for possible outputs. + */ + readonly protectionLevel: string; + /** + * If the enclosing CryptoKey has purpose `ASYMMETRIC_SIGN` or `ASYMMETRIC_DECRYPT`, this block contains details about the public key associated to this CryptoKeyVersion. Structure is documented below. + */ + readonly publicKeys: outputs.kms.GetCryptoKeyLatestVersionPublicKey[]; + /** + * The current state of the latest CryptoKeyVersion. See the [state reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions#CryptoKeyVersion.CryptoKeyVersionState) for possible outputs. + */ + readonly state: string; + readonly version: number; +} +/** + * Provides access to the latest Google Cloud Platform KMS CryptoKeyVersion in a CryptoKey. For more information see + * [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + * and + * [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + * + * ## Example Usage + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const myKeyRing = gcp.kms.getKMSKeyRing({ + * name: "my-key-ring", + * location: "us-central1", + * }); + * const myCryptoKey = myKeyRing.then(myKeyRing => gcp.kms.getKMSCryptoKey({ + * name: "my-crypto-key", + * keyRing: myKeyRing.id, + * })); + * const myCryptoKeyLatestVersion = gcp.kms.getCryptoKeyLatestVersion({ + * cryptoKey: myKey.id, + * }); + * ``` + */ +export function getCryptoKeyLatestVersionOutput(args: GetCryptoKeyLatestVersionOutputArgs, opts?: pulumi.InvokeOptions): pulumi.Output { + return pulumi.output(args).apply((a: any) => getCryptoKeyLatestVersion(a, opts)) +} + +/** + * A collection of arguments for invoking getCryptoKeyLatestVersion. + */ +export interface GetCryptoKeyLatestVersionOutputArgs { + /** + * The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + */ + cryptoKey: pulumi.Input; + /** + * The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on state. + * + * * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + */ + filter?: pulumi.Input; +} diff --git a/sdk/nodejs/kms/getCryptoKeyVersions.ts b/sdk/nodejs/kms/getCryptoKeyVersions.ts new file mode 100644 index 0000000000..d4092fe1aa --- /dev/null +++ b/sdk/nodejs/kms/getCryptoKeyVersions.ts @@ -0,0 +1,130 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as inputs from "../types/input"; +import * as outputs from "../types/output"; +import * as utilities from "../utilities"; + +/** + * Provides access to Google Cloud Platform KMS CryptoKeyVersions. For more information see + * [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + * and + * [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + * + * ## Example Usage + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const myKeyRing = gcp.kms.getKMSKeyRing({ + * name: "my-key-ring", + * location: "us-central1", + * }); + * const myCryptoKey = myKeyRing.then(myKeyRing => gcp.kms.getKMSCryptoKey({ + * name: "my-crypto-key", + * keyRing: myKeyRing.id, + * })); + * const myCryptoKeyVersions = gcp.kms.getCryptoKeyVersions({ + * cryptoKey: myKey.id, + * }); + * ``` + */ +export function getCryptoKeyVersions(args: GetCryptoKeyVersionsArgs, opts?: pulumi.InvokeOptions): Promise { + + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts || {}); + return pulumi.runtime.invoke("gcp:kms/getCryptoKeyVersions:getCryptoKeyVersions", { + "cryptoKey": args.cryptoKey, + "filter": args.filter, + }, opts); +} + +/** + * A collection of arguments for invoking getCryptoKeyVersions. + */ +export interface GetCryptoKeyVersionsArgs { + /** + * The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + */ + cryptoKey: string; + /** + * The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + * + * * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + * * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + */ + filter?: string; +} + +/** + * A collection of values returned by getCryptoKeyVersions. + */ +export interface GetCryptoKeyVersionsResult { + readonly cryptoKey: string; + readonly filter?: string; + /** + * The provider-assigned unique ID for this managed resource. + */ + readonly id: string; + readonly publicKeys: outputs.kms.GetCryptoKeyVersionsPublicKey[]; + /** + * A list of all the retrieved crypto key versions from the provided crypto key. This list is influenced by the provided filter argument. + */ + readonly versions: outputs.kms.GetCryptoKeyVersionsVersion[]; +} +/** + * Provides access to Google Cloud Platform KMS CryptoKeyVersions. For more information see + * [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + * and + * [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + * + * ## Example Usage + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const myKeyRing = gcp.kms.getKMSKeyRing({ + * name: "my-key-ring", + * location: "us-central1", + * }); + * const myCryptoKey = myKeyRing.then(myKeyRing => gcp.kms.getKMSCryptoKey({ + * name: "my-crypto-key", + * keyRing: myKeyRing.id, + * })); + * const myCryptoKeyVersions = gcp.kms.getCryptoKeyVersions({ + * cryptoKey: myKey.id, + * }); + * ``` + */ +export function getCryptoKeyVersionsOutput(args: GetCryptoKeyVersionsOutputArgs, opts?: pulumi.InvokeOptions): pulumi.Output { + return pulumi.output(args).apply((a: any) => getCryptoKeyVersions(a, opts)) +} + +/** + * A collection of arguments for invoking getCryptoKeyVersions. + */ +export interface GetCryptoKeyVersionsOutputArgs { + /** + * The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + */ + cryptoKey: pulumi.Input; + /** + * The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + * + * Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + * + * * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + * * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + * + * [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + */ + filter?: pulumi.Input; +} diff --git a/sdk/nodejs/kms/index.ts b/sdk/nodejs/kms/index.ts index 9be092ff5a..8a20c6a5df 100644 --- a/sdk/nodejs/kms/index.ts +++ b/sdk/nodejs/kms/index.ts @@ -60,6 +60,16 @@ export const getCryptoKeyIamPolicy: typeof import("./getCryptoKeyIamPolicy").get export const getCryptoKeyIamPolicyOutput: typeof import("./getCryptoKeyIamPolicy").getCryptoKeyIamPolicyOutput = null as any; utilities.lazyLoad(exports, ["getCryptoKeyIamPolicy","getCryptoKeyIamPolicyOutput"], () => require("./getCryptoKeyIamPolicy")); +export { GetCryptoKeyLatestVersionArgs, GetCryptoKeyLatestVersionResult, GetCryptoKeyLatestVersionOutputArgs } from "./getCryptoKeyLatestVersion"; +export const getCryptoKeyLatestVersion: typeof import("./getCryptoKeyLatestVersion").getCryptoKeyLatestVersion = null as any; +export const getCryptoKeyLatestVersionOutput: typeof import("./getCryptoKeyLatestVersion").getCryptoKeyLatestVersionOutput = null as any; +utilities.lazyLoad(exports, ["getCryptoKeyLatestVersion","getCryptoKeyLatestVersionOutput"], () => require("./getCryptoKeyLatestVersion")); + +export { GetCryptoKeyVersionsArgs, GetCryptoKeyVersionsResult, GetCryptoKeyVersionsOutputArgs } from "./getCryptoKeyVersions"; +export const getCryptoKeyVersions: typeof import("./getCryptoKeyVersions").getCryptoKeyVersions = null as any; +export const getCryptoKeyVersionsOutput: typeof import("./getCryptoKeyVersions").getCryptoKeyVersionsOutput = null as any; +utilities.lazyLoad(exports, ["getCryptoKeyVersions","getCryptoKeyVersionsOutput"], () => require("./getCryptoKeyVersions")); + export { GetCryptoKeysArgs, GetCryptoKeysResult, GetCryptoKeysOutputArgs } from "./getCryptoKeys"; export const getCryptoKeys: typeof import("./getCryptoKeys").getCryptoKeys = null as any; export const getCryptoKeysOutput: typeof import("./getCryptoKeys").getCryptoKeysOutput = null as any; diff --git a/sdk/nodejs/netapp/activeDirectory.ts b/sdk/nodejs/netapp/activeDirectory.ts index 29d81cd322..a281eca4f9 100644 --- a/sdk/nodejs/netapp/activeDirectory.ts +++ b/sdk/nodejs/netapp/activeDirectory.ts @@ -7,7 +7,7 @@ import * as utilities from "../utilities"; /** * ActiveDirectory is the public representation of the active directory config. * - * To get more information about activeDirectory, see: + * To get more information about ActiveDirectory, see: * * * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.activeDirectories) * * How-to Guides @@ -58,7 +58,7 @@ import * as utilities from "../utilities"; * * ## Import * - * activeDirectory can be imported using any of these accepted formats: + * ActiveDirectory can be imported using any of these accepted formats: * * * `projects/{{project}}/locations/{{location}}/activeDirectories/{{name}}` * @@ -66,7 +66,7 @@ import * as utilities from "../utilities"; * * * `{{location}}/{{name}}` * - * When using the `pulumi import` command, activeDirectory can be imported using one of the formats above. For example: + * When using the `pulumi import` command, ActiveDirectory can be imported using one of the formats above. For example: * * ```sh * $ pulumi import gcp:netapp/activeDirectory:ActiveDirectory default projects/{{project}}/locations/{{location}}/activeDirectories/{{name}} diff --git a/sdk/nodejs/netapp/backup.ts b/sdk/nodejs/netapp/backup.ts index b55251d9bb..294fbbdf35 100644 --- a/sdk/nodejs/netapp/backup.ts +++ b/sdk/nodejs/netapp/backup.ts @@ -20,7 +20,7 @@ import * as utilities from "../utilities"; * from a volume or from an existing volume snapshot. Scheduled backups * require a backup policy. * - * To get more information about backup, see: + * To get more information about Backup, see: * * * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults.backups) * * How-to Guides @@ -70,7 +70,7 @@ import * as utilities from "../utilities"; * * ## Import * - * backup can be imported using any of these accepted formats: + * Backup can be imported using any of these accepted formats: * * * `projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}}` * @@ -78,7 +78,7 @@ import * as utilities from "../utilities"; * * * `{{location}}/{{vault_name}}/{{name}}` * - * When using the `pulumi import` command, backup can be imported using one of the formats above. For example: + * When using the `pulumi import` command, Backup can be imported using one of the formats above. For example: * * ```sh * $ pulumi import gcp:netapp/backup:Backup default projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}} diff --git a/sdk/nodejs/netapp/backupPolicy.ts b/sdk/nodejs/netapp/backupPolicy.ts index c7eb1018f6..c5aaa3ca31 100644 --- a/sdk/nodejs/netapp/backupPolicy.ts +++ b/sdk/nodejs/netapp/backupPolicy.ts @@ -9,7 +9,7 @@ import * as utilities from "../utilities"; * Backup policies allow you to attach a backup schedule to a volume. * The policy defines how many backups to retain at daily, weekly, or monthly intervals. * - * To get more information about backupPolicy, see: + * To get more information about BackupPolicy, see: * * * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupPolicies) * * How-to Guides @@ -39,7 +39,7 @@ import * as utilities from "../utilities"; * * ## Import * - * backupPolicy can be imported using any of these accepted formats: + * BackupPolicy can be imported using any of these accepted formats: * * * `projects/{{project}}/locations/{{location}}/backupPolicies/{{name}}` * @@ -47,7 +47,7 @@ import * as utilities from "../utilities"; * * * `{{location}}/{{name}}` * - * When using the `pulumi import` command, backupPolicy can be imported using one of the formats above. For example: + * When using the `pulumi import` command, BackupPolicy can be imported using one of the formats above. For example: * * ```sh * $ pulumi import gcp:netapp/backupPolicy:BackupPolicy default projects/{{project}}/locations/{{location}}/backupPolicies/{{name}} diff --git a/sdk/nodejs/netapp/backupVault.ts b/sdk/nodejs/netapp/backupVault.ts index da6cde49ae..7a579480e6 100644 --- a/sdk/nodejs/netapp/backupVault.ts +++ b/sdk/nodejs/netapp/backupVault.ts @@ -8,7 +8,7 @@ import * as utilities from "../utilities"; * A backup vault is the location where backups are stored. You can only create one backup vault per region. * A vault can hold multiple backups for multiple volumes in that region. * - * To get more information about backupVault, see: + * To get more information about BackupVault, see: * * * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults) * * How-to Guides @@ -34,7 +34,7 @@ import * as utilities from "../utilities"; * * ## Import * - * backupVault can be imported using any of these accepted formats: + * BackupVault can be imported using any of these accepted formats: * * * `projects/{{project}}/locations/{{location}}/backupVaults/{{name}}` * @@ -42,7 +42,7 @@ import * as utilities from "../utilities"; * * * `{{location}}/{{name}}` * - * When using the `pulumi import` command, backupVault can be imported using one of the formats above. For example: + * When using the `pulumi import` command, BackupVault can be imported using one of the formats above. For example: * * ```sh * $ pulumi import gcp:netapp/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{name}} diff --git a/sdk/nodejs/netapp/storagePool.ts b/sdk/nodejs/netapp/storagePool.ts index 4562d62cf0..43108fac70 100644 --- a/sdk/nodejs/netapp/storagePool.ts +++ b/sdk/nodejs/netapp/storagePool.ts @@ -52,7 +52,7 @@ import * as utilities from "../utilities"; * * ## Import * - * storagePool can be imported using any of these accepted formats: + * StoragePool can be imported using any of these accepted formats: * * * `projects/{{project}}/locations/{{location}}/storagePools/{{name}}` * @@ -60,7 +60,7 @@ import * as utilities from "../utilities"; * * * `{{location}}/{{name}}` * - * When using the `pulumi import` command, storagePool can be imported using one of the formats above. For example: + * When using the `pulumi import` command, StoragePool can be imported using one of the formats above. For example: * * ```sh * $ pulumi import gcp:netapp/storagePool:StoragePool default projects/{{project}}/locations/{{location}}/storagePools/{{name}} diff --git a/sdk/nodejs/netapp/volume.ts b/sdk/nodejs/netapp/volume.ts index ef2304479c..a5120e83f1 100644 --- a/sdk/nodejs/netapp/volume.ts +++ b/sdk/nodejs/netapp/volume.ts @@ -122,6 +122,7 @@ export class Volume extends pulumi.CustomResource { * Policy to determine if the volume should be deleted forcefully. * Volumes may have nested snapshot resources. Deleting such a volume will fail. * Setting this parameter to FORCE will delete volumes including nested snapshots. + * Possible values: DEFAULT, FORCE. */ public readonly deletionPolicy!: pulumi.Output; /** @@ -408,6 +409,7 @@ export interface VolumeState { * Policy to determine if the volume should be deleted forcefully. * Volumes may have nested snapshot resources. Deleting such a volume will fail. * Setting this parameter to FORCE will delete volumes including nested snapshots. + * Possible values: DEFAULT, FORCE. */ deletionPolicy?: pulumi.Input; /** @@ -575,6 +577,7 @@ export interface VolumeArgs { * Policy to determine if the volume should be deleted forcefully. * Volumes may have nested snapshot resources. Deleting such a volume will fail. * Setting this parameter to FORCE will delete volumes including nested snapshots. + * Possible values: DEFAULT, FORCE. */ deletionPolicy?: pulumi.Input; /** diff --git a/sdk/nodejs/networkconnectivity/spoke.ts b/sdk/nodejs/networkconnectivity/spoke.ts index d95db861fe..b9a54e2ab9 100644 --- a/sdk/nodejs/networkconnectivity/spoke.ts +++ b/sdk/nodejs/networkconnectivity/spoke.ts @@ -47,6 +47,10 @@ import * as utilities from "../utilities"; * "198.51.100.0/24", * "10.10.0.0/16", * ], + * includeExportRanges: [ + * "198.51.100.0/23", + * "10.0.0.0/8", + * ], * uri: network.selfLink, * }, * }); diff --git a/sdk/nodejs/networksecurity/clientTlsPolicy.ts b/sdk/nodejs/networksecurity/clientTlsPolicy.ts index f30a46e26c..c157837ada 100644 --- a/sdk/nodejs/networksecurity/clientTlsPolicy.ts +++ b/sdk/nodejs/networksecurity/clientTlsPolicy.ts @@ -7,6 +7,14 @@ import * as outputs from "../types/output"; import * as utilities from "../utilities"; /** + * ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. + * + * To get more information about ClientTlsPolicy, see: + * + * * [API documentation](https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.clientTlsPolicies) + * * How-to Guides + * * [Service Security](https://cloud.google.com/traffic-director/docs/security-use-cases) + * * ## Example Usage * * ### Network Security Client Tls Policy Basic @@ -41,18 +49,11 @@ import * as utilities from "../utilities"; * pluginInstance: "google_cloud_private_spiffe", * }, * }, - * serverValidationCas: [ - * { - * grpcEndpoint: { - * targetUri: "unix:mypath", - * }, - * }, - * { - * grpcEndpoint: { - * targetUri: "unix:mypath1", - * }, + * serverValidationCas: [{ + * grpcEndpoint: { + * targetUri: "unix:mypath", * }, - * ], + * }], * }); * ``` * diff --git a/sdk/nodejs/networksecurity/serverTlsPolicy.ts b/sdk/nodejs/networksecurity/serverTlsPolicy.ts index 0059c69972..582cbba157 100644 --- a/sdk/nodejs/networksecurity/serverTlsPolicy.ts +++ b/sdk/nodejs/networksecurity/serverTlsPolicy.ts @@ -7,6 +7,12 @@ import * as outputs from "../types/output"; import * as utilities from "../utilities"; /** + * ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. + * + * To get more information about ServerTlsPolicy, see: + * + * * [API documentation](https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.serverTlsPolicies) + * * ## Example Usage * * ### Network Security Server Tls Policy Basic @@ -28,23 +34,11 @@ import * as utilities from "../utilities"; * }, * }, * mtlsPolicy: { - * clientValidationCas: [ - * { - * grpcEndpoint: { - * targetUri: "unix:mypath", - * }, + * clientValidationCas: [{ + * grpcEndpoint: { + * targetUri: "unix:mypath", * }, - * { - * grpcEndpoint: { - * targetUri: "unix:abc/mypath", - * }, - * }, - * { - * certificateProviderInstance: { - * pluginInstance: "google_cloud_private_spiffe", - * }, - * }, - * ], + * }], * }, * }); * ``` diff --git a/sdk/nodejs/organizations/getProject.ts b/sdk/nodejs/organizations/getProject.ts index 5fc781c095..903542aa30 100644 --- a/sdk/nodejs/organizations/getProject.ts +++ b/sdk/nodejs/organizations/getProject.ts @@ -60,6 +60,7 @@ export interface GetProjectResult { readonly orgId: string; readonly projectId?: string; readonly pulumiLabels: {[key: string]: string}; + readonly tags: {[key: string]: string}; } /** * Use this data source to get project details. diff --git a/sdk/nodejs/organizations/project.ts b/sdk/nodejs/organizations/project.ts index 1aa0e32886..8504c4c505 100644 --- a/sdk/nodejs/organizations/project.ts +++ b/sdk/nodejs/organizations/project.ts @@ -17,6 +17,10 @@ import * as utilities from "../utilities"; * * > This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account. * + * > It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `autoCreateNetwork` to false, when possible. + * + * > It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. + * * To get more information about projects, see: * * * [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects) @@ -53,6 +57,22 @@ import * as utilities from "../utilities"; * }); * ``` * + * To create a project with a tag + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const myProject = new gcp.organizations.Project("my_project", { + * name: "My Project", + * projectId: "your-project-id", + * orgId: "1234567", + * tags: { + * "1234567/env": "staging", + * }, + * }); + * ``` + * * ## Import * * Projects can be imported using the `project_id`, e.g. @@ -151,6 +171,10 @@ export class Project extends pulumi.CustomResource { * The combination of labels configured directly on the resource and default labels configured on the provider. */ public /*out*/ readonly pulumiLabels!: pulumi.Output<{[key: string]: string}>; + /** + * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + */ + public readonly tags!: pulumi.Output<{[key: string]: string} | undefined>; /** * Create a Project resource with the given unique name, arguments, and options. @@ -176,6 +200,7 @@ export class Project extends pulumi.CustomResource { resourceInputs["orgId"] = state ? state.orgId : undefined; resourceInputs["projectId"] = state ? state.projectId : undefined; resourceInputs["pulumiLabels"] = state ? state.pulumiLabels : undefined; + resourceInputs["tags"] = state ? state.tags : undefined; } else { const args = argsOrState as ProjectArgs | undefined; resourceInputs["autoCreateNetwork"] = args ? args.autoCreateNetwork : undefined; @@ -186,6 +211,7 @@ export class Project extends pulumi.CustomResource { resourceInputs["name"] = args ? args.name : undefined; resourceInputs["orgId"] = args ? args.orgId : undefined; resourceInputs["projectId"] = args ? args.projectId : undefined; + resourceInputs["tags"] = args ? args.tags : undefined; resourceInputs["effectiveLabels"] = undefined /*out*/; resourceInputs["number"] = undefined /*out*/; resourceInputs["pulumiLabels"] = undefined /*out*/; @@ -259,6 +285,10 @@ export interface ProjectState { * The combination of labels configured directly on the resource and default labels configured on the provider. */ pulumiLabels?: pulumi.Input<{[key: string]: pulumi.Input}>; + /** + * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + */ + tags?: pulumi.Input<{[key: string]: pulumi.Input}>; } /** @@ -311,4 +341,8 @@ export interface ProjectArgs { * The project ID. Changing this forces a new project to be created. */ projectId?: pulumi.Input; + /** + * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + */ + tags?: pulumi.Input<{[key: string]: pulumi.Input}>; } diff --git a/sdk/nodejs/parallelstore/instance.ts b/sdk/nodejs/parallelstore/instance.ts index 56f819f24a..829d4e0b3e 100644 --- a/sdk/nodejs/parallelstore/instance.ts +++ b/sdk/nodejs/parallelstore/instance.ts @@ -114,7 +114,7 @@ export class Instance extends pulumi.CustomResource { */ public /*out*/ readonly createTime!: pulumi.Output; /** - * The version of DAOS software running in the instance + * The version of DAOS software running in the instance. */ public /*out*/ readonly daosVersion!: pulumi.Output; /** @@ -137,9 +137,9 @@ export class Instance extends pulumi.CustomResource { */ public /*out*/ readonly effectiveLabels!: pulumi.Output<{[key: string]: string}>; /** - * Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. This field is populated by the service and + * Immutable. Contains the id of the allocated IP address + * range associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. This field is populated by the service * and contains the value currently used by the service. */ public /*out*/ readonly effectiveReservedIpRange!: pulumi.Output; @@ -167,12 +167,12 @@ export class Instance extends pulumi.CustomResource { */ public readonly instanceId!: pulumi.Output; /** - * Cloud Labels are a flexible and lightweight mechanism for organizing cloud - * resources into groups that reflect a customer's organizational needs and - * deployment strategies. Cloud Labels can be used to filter collections of - * resources. They can be used to control how resource metrics are aggregated. - * And they can be used as arguments to policy management rules (e.g. route, - * firewall, load balancing, etc.). + * Cloud Labels are a flexible and lightweight mechanism for + * organizing cloud resources into groups that reflect a customer's organizational + * needs and deployment strategies. Cloud Labels can be used to filter collections + * of resources. They can be used to control how resource metrics are aggregated. + * And they can be used as arguments to policy management rules (e.g. route, firewall, + * load balancing, etc.). * * Label keys must be between 1 and 63 characters long and must conform to * the following regular expression: `a-z{0,62}`. * * Label values must be between 0 and 63 characters long and must conform @@ -183,8 +183,9 @@ export class Instance extends pulumi.CustomResource { * characters may be allowed in the future. Therefore, you are advised to use * an internal label representation, such as JSON, which doesn't rely upon * specific characters being disallowed. For example, representing labels - * as the string: name + "_" + value would prove problematic if we were to - * allow "_" in a future release. + * as the string: `name + "_" + value` would prove problematic if we were to + * allow `"_"` in a future release. " + * * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. * Please refer to the field `effectiveLabels` for all of the labels present on the resource. */ @@ -199,9 +200,8 @@ export class Instance extends pulumi.CustomResource { */ public /*out*/ readonly name!: pulumi.Output; /** - * Immutable. The name of the Google Compute Engine - * [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - * instance is connected. + * Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + * to which the instance is connected. */ public readonly network!: pulumi.Output; /** @@ -215,10 +215,10 @@ export class Instance extends pulumi.CustomResource { */ public /*out*/ readonly pulumiLabels!: pulumi.Output<{[key: string]: string}>; /** - * Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. If no range id is provided all ranges will be - * considered. + * Immutable. Contains the id of the allocated IP address range + * associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + * be considered. */ public readonly reservedIpRange!: pulumi.Output; /** @@ -325,7 +325,7 @@ export interface InstanceState { */ createTime?: pulumi.Input; /** - * The version of DAOS software running in the instance + * The version of DAOS software running in the instance. */ daosVersion?: pulumi.Input; /** @@ -348,9 +348,9 @@ export interface InstanceState { */ effectiveLabels?: pulumi.Input<{[key: string]: pulumi.Input}>; /** - * Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. This field is populated by the service and + * Immutable. Contains the id of the allocated IP address + * range associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. This field is populated by the service * and contains the value currently used by the service. */ effectiveReservedIpRange?: pulumi.Input; @@ -378,12 +378,12 @@ export interface InstanceState { */ instanceId?: pulumi.Input; /** - * Cloud Labels are a flexible and lightweight mechanism for organizing cloud - * resources into groups that reflect a customer's organizational needs and - * deployment strategies. Cloud Labels can be used to filter collections of - * resources. They can be used to control how resource metrics are aggregated. - * And they can be used as arguments to policy management rules (e.g. route, - * firewall, load balancing, etc.). + * Cloud Labels are a flexible and lightweight mechanism for + * organizing cloud resources into groups that reflect a customer's organizational + * needs and deployment strategies. Cloud Labels can be used to filter collections + * of resources. They can be used to control how resource metrics are aggregated. + * And they can be used as arguments to policy management rules (e.g. route, firewall, + * load balancing, etc.). * * Label keys must be between 1 and 63 characters long and must conform to * the following regular expression: `a-z{0,62}`. * * Label values must be between 0 and 63 characters long and must conform @@ -394,8 +394,9 @@ export interface InstanceState { * characters may be allowed in the future. Therefore, you are advised to use * an internal label representation, such as JSON, which doesn't rely upon * specific characters being disallowed. For example, representing labels - * as the string: name + "_" + value would prove problematic if we were to - * allow "_" in a future release. + * as the string: `name + "_" + value` would prove problematic if we were to + * allow `"_"` in a future release. " + * * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. * Please refer to the field `effectiveLabels` for all of the labels present on the resource. */ @@ -410,9 +411,8 @@ export interface InstanceState { */ name?: pulumi.Input; /** - * Immutable. The name of the Google Compute Engine - * [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - * instance is connected. + * Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + * to which the instance is connected. */ network?: pulumi.Input; /** @@ -426,10 +426,10 @@ export interface InstanceState { */ pulumiLabels?: pulumi.Input<{[key: string]: pulumi.Input}>; /** - * Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. If no range id is provided all ranges will be - * considered. + * Immutable. Contains the id of the allocated IP address range + * associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + * be considered. */ reservedIpRange?: pulumi.Input; /** @@ -496,12 +496,12 @@ export interface InstanceArgs { */ instanceId: pulumi.Input; /** - * Cloud Labels are a flexible and lightweight mechanism for organizing cloud - * resources into groups that reflect a customer's organizational needs and - * deployment strategies. Cloud Labels can be used to filter collections of - * resources. They can be used to control how resource metrics are aggregated. - * And they can be used as arguments to policy management rules (e.g. route, - * firewall, load balancing, etc.). + * Cloud Labels are a flexible and lightweight mechanism for + * organizing cloud resources into groups that reflect a customer's organizational + * needs and deployment strategies. Cloud Labels can be used to filter collections + * of resources. They can be used to control how resource metrics are aggregated. + * And they can be used as arguments to policy management rules (e.g. route, firewall, + * load balancing, etc.). * * Label keys must be between 1 and 63 characters long and must conform to * the following regular expression: `a-z{0,62}`. * * Label values must be between 0 and 63 characters long and must conform @@ -512,8 +512,9 @@ export interface InstanceArgs { * characters may be allowed in the future. Therefore, you are advised to use * an internal label representation, such as JSON, which doesn't rely upon * specific characters being disallowed. For example, representing labels - * as the string: name + "_" + value would prove problematic if we were to - * allow "_" in a future release. + * as the string: `name + "_" + value` would prove problematic if we were to + * allow `"_"` in a future release. " + * * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. * Please refer to the field `effectiveLabels` for all of the labels present on the resource. */ @@ -523,9 +524,8 @@ export interface InstanceArgs { */ location: pulumi.Input; /** - * Immutable. The name of the Google Compute Engine - * [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - * instance is connected. + * Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + * to which the instance is connected. */ network?: pulumi.Input; /** @@ -534,10 +534,10 @@ export interface InstanceArgs { */ project?: pulumi.Input; /** - * Immutable. Contains the id of the allocated IP address range associated with the - * private service access connection for example, "test-default" associated - * with IP range 10.0.0.0/29. If no range id is provided all ranges will be - * considered. + * Immutable. Contains the id of the allocated IP address range + * associated with the private service access connection for example, \"test-default\" + * associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + * be considered. */ reservedIpRange?: pulumi.Input; } diff --git a/sdk/nodejs/projects/iamMemberRemove.ts b/sdk/nodejs/projects/iamMemberRemove.ts index e3429699a9..434c1a75ad 100644 --- a/sdk/nodejs/projects/iamMemberRemove.ts +++ b/sdk/nodejs/projects/iamMemberRemove.ts @@ -24,6 +24,20 @@ import * as utilities from "../utilities"; * [the official documentation](https://cloud.google.com/iam/docs/granting-changing-revoking-access) * and * [API reference](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy). + * + * ## Example Usage + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const targetProject = gcp.organizations.getProject({}); + * const foo = new gcp.projects.IamMemberRemove("foo", { + * role: "roles/editor", + * project: targetProjectGoogleProject.projectId, + * member: `serviceAccount:${targetProjectGoogleProject.number}-compute@developer.gserviceaccount.com`, + * }); + * ``` */ export class IamMemberRemove extends pulumi.CustomResource { /** diff --git a/sdk/nodejs/projects/usageExportBucket.ts b/sdk/nodejs/projects/usageExportBucket.ts index 7e1232eb55..9657d30c88 100644 --- a/sdk/nodejs/projects/usageExportBucket.ts +++ b/sdk/nodejs/projects/usageExportBucket.ts @@ -17,6 +17,10 @@ import * as utilities from "../utilities"; * * > This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account. * + * > It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `autoCreateNetwork` to false, when possible. + * + * > It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. + * * To get more information about projects, see: * * * [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects) @@ -53,6 +57,22 @@ import * as utilities from "../utilities"; * }); * ``` * + * To create a project with a tag + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const myProject = new gcp.organizations.Project("my_project", { + * name: "My Project", + * projectId: "your-project-id", + * orgId: "1234567", + * tags: { + * "1234567/env": "staging", + * }, + * }); + * ``` + * * ## Import * * Projects can be imported using the `project_id`, e.g. diff --git a/sdk/nodejs/pubsub/subscription.ts b/sdk/nodejs/pubsub/subscription.ts index 1ec6008621..cdc5ecc28d 100644 --- a/sdk/nodejs/pubsub/subscription.ts +++ b/sdk/nodejs/pubsub/subscription.ts @@ -258,6 +258,7 @@ import * as utilities from "../utilities"; * filenameDatetimeFormat: "YYYY-MM-DD/hh_mm_ssZ", * maxBytes: 1000, * maxDuration: "300s", + * maxMessages: 1000, * }, * }, { * dependsOn: [ @@ -294,8 +295,10 @@ import * as utilities from "../utilities"; * filenameDatetimeFormat: "YYYY-MM-DD/hh_mm_ssZ", * maxBytes: 1000, * maxDuration: "300s", + * maxMessages: 1000, * avroConfig: { * writeMetadata: true, + * useTopicSchema: true, * }, * }, * }, { diff --git a/sdk/nodejs/redis/cluster.ts b/sdk/nodejs/redis/cluster.ts index 55d9028074..a83b798c9a 100644 --- a/sdk/nodejs/redis/cluster.ts +++ b/sdk/nodejs/redis/cluster.ts @@ -61,6 +61,17 @@ import * as utilities from "../utilities"; * zoneDistributionConfig: { * mode: "MULTI_ZONE", * }, + * maintenancePolicy: { + * weeklyMaintenanceWindows: [{ + * day: "MONDAY", + * startTime: { + * hours: 1, + * minutes: 0, + * seconds: 0, + * nanos: 0, + * }, + * }], + * }, * }, { * dependsOn: [_default], * }); @@ -102,6 +113,17 @@ import * as utilities from "../utilities"; * mode: "SINGLE_ZONE", * zone: "us-central1-f", * }, + * maintenancePolicy: { + * weeklyMaintenanceWindows: [{ + * day: "MONDAY", + * startTime: { + * hours: 1, + * minutes: 0, + * seconds: 0, + * nanos: 0, + * }, + * }], + * }, * deletionProtectionEnabled: true, * }, { * dependsOn: [_default], @@ -190,6 +212,15 @@ export class Cluster extends pulumi.CustomResource { * Structure is documented below. */ public /*out*/ readonly discoveryEndpoints!: pulumi.Output; + /** + * Maintenance policy for a cluster + */ + public readonly maintenancePolicy!: pulumi.Output; + /** + * Upcoming maintenance schedule. + * Structure is documented below. + */ + public /*out*/ readonly maintenanceSchedules!: pulumi.Output; /** * Unique name of the resource in this scope including project and location using the form: * projects/{projectId}/locations/{locationId}/clusters/{clusterId} @@ -280,6 +311,8 @@ export class Cluster extends pulumi.CustomResource { resourceInputs["createTime"] = state ? state.createTime : undefined; resourceInputs["deletionProtectionEnabled"] = state ? state.deletionProtectionEnabled : undefined; resourceInputs["discoveryEndpoints"] = state ? state.discoveryEndpoints : undefined; + resourceInputs["maintenancePolicy"] = state ? state.maintenancePolicy : undefined; + resourceInputs["maintenanceSchedules"] = state ? state.maintenanceSchedules : undefined; resourceInputs["name"] = state ? state.name : undefined; resourceInputs["nodeType"] = state ? state.nodeType : undefined; resourceInputs["preciseSizeGb"] = state ? state.preciseSizeGb : undefined; @@ -306,6 +339,7 @@ export class Cluster extends pulumi.CustomResource { } resourceInputs["authorizationMode"] = args ? args.authorizationMode : undefined; resourceInputs["deletionProtectionEnabled"] = args ? args.deletionProtectionEnabled : undefined; + resourceInputs["maintenancePolicy"] = args ? args.maintenancePolicy : undefined; resourceInputs["name"] = args ? args.name : undefined; resourceInputs["nodeType"] = args ? args.nodeType : undefined; resourceInputs["project"] = args ? args.project : undefined; @@ -318,6 +352,7 @@ export class Cluster extends pulumi.CustomResource { resourceInputs["zoneDistributionConfig"] = args ? args.zoneDistributionConfig : undefined; resourceInputs["createTime"] = undefined /*out*/; resourceInputs["discoveryEndpoints"] = undefined /*out*/; + resourceInputs["maintenanceSchedules"] = undefined /*out*/; resourceInputs["preciseSizeGb"] = undefined /*out*/; resourceInputs["pscConnections"] = undefined /*out*/; resourceInputs["sizeGb"] = undefined /*out*/; @@ -358,6 +393,15 @@ export interface ClusterState { * Structure is documented below. */ discoveryEndpoints?: pulumi.Input[]>; + /** + * Maintenance policy for a cluster + */ + maintenancePolicy?: pulumi.Input; + /** + * Upcoming maintenance schedule. + * Structure is documented below. + */ + maintenanceSchedules?: pulumi.Input[]>; /** * Unique name of the resource in this scope including project and location using the form: * projects/{projectId}/locations/{locationId}/clusters/{clusterId} @@ -447,6 +491,10 @@ export interface ClusterArgs { * operation will fail. Default value is true. */ deletionProtectionEnabled?: pulumi.Input; + /** + * Maintenance policy for a cluster + */ + maintenancePolicy?: pulumi.Input; /** * Unique name of the resource in this scope including project and location using the form: * projects/{projectId}/locations/{locationId}/clusters/{clusterId} diff --git a/sdk/nodejs/securitycenter/index.ts b/sdk/nodejs/securitycenter/index.ts index d69e8d19ff..d4456e9d1c 100644 --- a/sdk/nodejs/securitycenter/index.ts +++ b/sdk/nodejs/securitycenter/index.ts @@ -120,6 +120,11 @@ export type V2FolderNotificationConfig = import("./v2folderNotificationConfig"). export const V2FolderNotificationConfig: typeof import("./v2folderNotificationConfig").V2FolderNotificationConfig = null as any; utilities.lazyLoad(exports, ["V2FolderNotificationConfig"], () => require("./v2folderNotificationConfig")); +export { V2FolderSccBigQueryExportArgs, V2FolderSccBigQueryExportState } from "./v2folderSccBigQueryExport"; +export type V2FolderSccBigQueryExport = import("./v2folderSccBigQueryExport").V2FolderSccBigQueryExport; +export const V2FolderSccBigQueryExport: typeof import("./v2folderSccBigQueryExport").V2FolderSccBigQueryExport = null as any; +utilities.lazyLoad(exports, ["V2FolderSccBigQueryExport"], () => require("./v2folderSccBigQueryExport")); + export { V2OrganizationMuteConfigArgs, V2OrganizationMuteConfigState } from "./v2organizationMuteConfig"; export type V2OrganizationMuteConfig = import("./v2organizationMuteConfig").V2OrganizationMuteConfig; export const V2OrganizationMuteConfig: typeof import("./v2organizationMuteConfig").V2OrganizationMuteConfig = null as any; @@ -165,6 +170,11 @@ export type V2ProjectNotificationConfig = import("./v2projectNotificationConfig" export const V2ProjectNotificationConfig: typeof import("./v2projectNotificationConfig").V2ProjectNotificationConfig = null as any; utilities.lazyLoad(exports, ["V2ProjectNotificationConfig"], () => require("./v2projectNotificationConfig")); +export { V2ProjectSccBigQueryExportArgs, V2ProjectSccBigQueryExportState } from "./v2projectSccBigQueryExport"; +export type V2ProjectSccBigQueryExport = import("./v2projectSccBigQueryExport").V2ProjectSccBigQueryExport; +export const V2ProjectSccBigQueryExport: typeof import("./v2projectSccBigQueryExport").V2ProjectSccBigQueryExport = null as any; +utilities.lazyLoad(exports, ["V2ProjectSccBigQueryExport"], () => require("./v2projectSccBigQueryExport")); + const _module = { version: utilities.getVersion(), @@ -212,6 +222,8 @@ const _module = { return new V2FolderMuteConfig(name, undefined, { urn }) case "gcp:securitycenter/v2FolderNotificationConfig:V2FolderNotificationConfig": return new V2FolderNotificationConfig(name, undefined, { urn }) + case "gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport": + return new V2FolderSccBigQueryExport(name, undefined, { urn }) case "gcp:securitycenter/v2OrganizationMuteConfig:V2OrganizationMuteConfig": return new V2OrganizationMuteConfig(name, undefined, { urn }) case "gcp:securitycenter/v2OrganizationNotificationConfig:V2OrganizationNotificationConfig": @@ -230,6 +242,8 @@ const _module = { return new V2ProjectMuteConfig(name, undefined, { urn }) case "gcp:securitycenter/v2ProjectNotificationConfig:V2ProjectNotificationConfig": return new V2ProjectNotificationConfig(name, undefined, { urn }) + case "gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport": + return new V2ProjectSccBigQueryExport(name, undefined, { urn }) default: throw new Error(`unknown resource type ${type}`); } @@ -256,6 +270,7 @@ pulumi.runtime.registerResourceModule("gcp", "securitycenter/sourceIamMember", _ pulumi.runtime.registerResourceModule("gcp", "securitycenter/sourceIamPolicy", _module) pulumi.runtime.registerResourceModule("gcp", "securitycenter/v2FolderMuteConfig", _module) pulumi.runtime.registerResourceModule("gcp", "securitycenter/v2FolderNotificationConfig", _module) +pulumi.runtime.registerResourceModule("gcp", "securitycenter/v2FolderSccBigQueryExport", _module) pulumi.runtime.registerResourceModule("gcp", "securitycenter/v2OrganizationMuteConfig", _module) pulumi.runtime.registerResourceModule("gcp", "securitycenter/v2OrganizationNotificationConfig", _module) pulumi.runtime.registerResourceModule("gcp", "securitycenter/v2OrganizationSccBigQueryExports", _module) @@ -265,3 +280,4 @@ pulumi.runtime.registerResourceModule("gcp", "securitycenter/v2OrganizationSourc pulumi.runtime.registerResourceModule("gcp", "securitycenter/v2OrganizationSourceIamPolicy", _module) pulumi.runtime.registerResourceModule("gcp", "securitycenter/v2ProjectMuteConfig", _module) pulumi.runtime.registerResourceModule("gcp", "securitycenter/v2ProjectNotificationConfig", _module) +pulumi.runtime.registerResourceModule("gcp", "securitycenter/v2ProjectSccBigQueryExport", _module) diff --git a/sdk/nodejs/securitycenter/v2folderSccBigQueryExport.ts b/sdk/nodejs/securitycenter/v2folderSccBigQueryExport.ts new file mode 100644 index 0000000000..bccfd80500 --- /dev/null +++ b/sdk/nodejs/securitycenter/v2folderSccBigQueryExport.ts @@ -0,0 +1,365 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as utilities from "../utilities"; + +/** + * A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + * It represents exporting Security Command Center data, including assets, findings, and security marks + * using gcloud scc bqexports + * > **Note:** In order to use Cloud SCC resources, your organization must be enrolled + * in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + * Without doing so, you may run into errors during resource creation. + * + * To get more information about FolderSccBigQueryExport, see: + * + * * [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/folders.locations.bigQueryExports) + * * How-to Guides + * * [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query) + * + * ## Example Usage + * + * ### Scc V2 Folder Big Query Export Config Basic + * + * ```typescript + * import * as pulumi from "@pulumi/pulumi"; + * import * as gcp from "@pulumi/gcp"; + * + * const folder = new gcp.organizations.Folder("folder", { + * parent: "organizations/123456789", + * displayName: "folder-name", + * deletionProtection: false, + * }); + * const _default = new gcp.bigquery.Dataset("default", { + * datasetId: "my_dataset_id", + * friendlyName: "test", + * description: "This is a test description", + * location: "US", + * defaultTableExpirationMs: 3600000, + * defaultPartitionExpirationMs: undefined, + * labels: { + * env: "default", + * }, + * }); + * const customBigQueryExportConfig = new gcp.securitycenter.V2FolderSccBigQueryExport("custom_big_query_export_config", { + * bigQueryExportId: "my-export", + * folder: folder.folderId, + * dataset: _default.id, + * location: "global", + * description: "Cloud Security Command Center Findings Big Query Export Config", + * filter: "state=\"ACTIVE\" AND NOT mute=\"MUTED\"", + * }); + * ``` + * + * ## Import + * + * FolderSccBigQueryExport can be imported using any of these accepted formats: + * + * * `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}` + * + * * `{{folder}}/{{location}}/{{big_query_export_id}}` + * + * When using the `pulumi import` command, FolderSccBigQueryExport can be imported using one of the formats above. For example: + * + * ```sh + * $ pulumi import gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport default folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} + * ``` + * + * ```sh + * $ pulumi import gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport default {{folder}}/{{location}}/{{big_query_export_id}} + * ``` + */ +export class V2FolderSccBigQueryExport extends pulumi.CustomResource { + /** + * Get an existing V2FolderSccBigQueryExport resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state Any extra arguments used during the lookup. + * @param opts Optional settings to control the behavior of the CustomResource. + */ + public static get(name: string, id: pulumi.Input, state?: V2FolderSccBigQueryExportState, opts?: pulumi.CustomResourceOptions): V2FolderSccBigQueryExport { + return new V2FolderSccBigQueryExport(name, state, { ...opts, id: id }); + } + + /** @internal */ + public static readonly __pulumiType = 'gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport'; + + /** + * Returns true if the given object is an instance of V2FolderSccBigQueryExport. This is designed to work even + * when multiple copies of the Pulumi SDK have been loaded into the same process. + */ + public static isInstance(obj: any): obj is V2FolderSccBigQueryExport { + if (obj === undefined || obj === null) { + return false; + } + return obj['__pulumiType'] === V2FolderSccBigQueryExport.__pulumiType; + } + + /** + * This must be unique within the organization. It must consist of only lowercase letters, + * numbers, and hyphens, must start with a letter, must end with either a letter or a number, + * and must be 63 characters or less. + * + * + * - - - + */ + public readonly bigQueryExportId!: pulumi.Output; + /** + * The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + */ + public /*out*/ readonly createTime!: pulumi.Output; + /** + * The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + */ + public readonly dataset!: pulumi.Output; + /** + * The description of the notification config (max of 1024 characters). + */ + public readonly description!: pulumi.Output; + /** + * Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * >, <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + */ + public readonly filter!: pulumi.Output; + /** + * The folder where Cloud Security Command Center Big Query Export + * Config lives in. + */ + public readonly folder!: pulumi.Output; + /** + * The BigQuery export configuration is stored in this location. If not provided, Use global as default. + */ + public readonly location!: pulumi.Output; + /** + * Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + */ + public /*out*/ readonly mostRecentEditor!: pulumi.Output; + /** + * The resource name of this export, in the format + * `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + */ + public /*out*/ readonly name!: pulumi.Output; + /** + * The service account that needs permission to create table and upload data to the BigQuery dataset. + */ + public /*out*/ readonly principal!: pulumi.Output; + /** + * The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + */ + public /*out*/ readonly updateTime!: pulumi.Output; + + /** + * Create a V2FolderSccBigQueryExport resource with the given unique name, arguments, and options. + * + * @param name The _unique_ name of the resource. + * @param args The arguments to use to populate this resource's properties. + * @param opts A bag of options that control this resource's behavior. + */ + constructor(name: string, args: V2FolderSccBigQueryExportArgs, opts?: pulumi.CustomResourceOptions) + constructor(name: string, argsOrState?: V2FolderSccBigQueryExportArgs | V2FolderSccBigQueryExportState, opts?: pulumi.CustomResourceOptions) { + let resourceInputs: pulumi.Inputs = {}; + opts = opts || {}; + if (opts.id) { + const state = argsOrState as V2FolderSccBigQueryExportState | undefined; + resourceInputs["bigQueryExportId"] = state ? state.bigQueryExportId : undefined; + resourceInputs["createTime"] = state ? state.createTime : undefined; + resourceInputs["dataset"] = state ? state.dataset : undefined; + resourceInputs["description"] = state ? state.description : undefined; + resourceInputs["filter"] = state ? state.filter : undefined; + resourceInputs["folder"] = state ? state.folder : undefined; + resourceInputs["location"] = state ? state.location : undefined; + resourceInputs["mostRecentEditor"] = state ? state.mostRecentEditor : undefined; + resourceInputs["name"] = state ? state.name : undefined; + resourceInputs["principal"] = state ? state.principal : undefined; + resourceInputs["updateTime"] = state ? state.updateTime : undefined; + } else { + const args = argsOrState as V2FolderSccBigQueryExportArgs | undefined; + if ((!args || args.bigQueryExportId === undefined) && !opts.urn) { + throw new Error("Missing required property 'bigQueryExportId'"); + } + if ((!args || args.folder === undefined) && !opts.urn) { + throw new Error("Missing required property 'folder'"); + } + resourceInputs["bigQueryExportId"] = args ? args.bigQueryExportId : undefined; + resourceInputs["dataset"] = args ? args.dataset : undefined; + resourceInputs["description"] = args ? args.description : undefined; + resourceInputs["filter"] = args ? args.filter : undefined; + resourceInputs["folder"] = args ? args.folder : undefined; + resourceInputs["location"] = args ? args.location : undefined; + resourceInputs["createTime"] = undefined /*out*/; + resourceInputs["mostRecentEditor"] = undefined /*out*/; + resourceInputs["name"] = undefined /*out*/; + resourceInputs["principal"] = undefined /*out*/; + resourceInputs["updateTime"] = undefined /*out*/; + } + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); + super(V2FolderSccBigQueryExport.__pulumiType, name, resourceInputs, opts); + } +} + +/** + * Input properties used for looking up and filtering V2FolderSccBigQueryExport resources. + */ +export interface V2FolderSccBigQueryExportState { + /** + * This must be unique within the organization. It must consist of only lowercase letters, + * numbers, and hyphens, must start with a letter, must end with either a letter or a number, + * and must be 63 characters or less. + * + * + * - - - + */ + bigQueryExportId?: pulumi.Input; + /** + * The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + */ + createTime?: pulumi.Input; + /** + * The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + */ + dataset?: pulumi.Input; + /** + * The description of the notification config (max of 1024 characters). + */ + description?: pulumi.Input; + /** + * Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * >, <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + */ + filter?: pulumi.Input; + /** + * The folder where Cloud Security Command Center Big Query Export + * Config lives in. + */ + folder?: pulumi.Input; + /** + * The BigQuery export configuration is stored in this location. If not provided, Use global as default. + */ + location?: pulumi.Input; + /** + * Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + */ + mostRecentEditor?: pulumi.Input; + /** + * The resource name of this export, in the format + * `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + */ + name?: pulumi.Input; + /** + * The service account that needs permission to create table and upload data to the BigQuery dataset. + */ + principal?: pulumi.Input; + /** + * The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + */ + updateTime?: pulumi.Input; +} + +/** + * The set of arguments for constructing a V2FolderSccBigQueryExport resource. + */ +export interface V2FolderSccBigQueryExportArgs { + /** + * This must be unique within the organization. It must consist of only lowercase letters, + * numbers, and hyphens, must start with a letter, must end with either a letter or a number, + * and must be 63 characters or less. + * + * + * - - - + */ + bigQueryExportId: pulumi.Input; + /** + * The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + */ + dataset?: pulumi.Input; + /** + * The description of the notification config (max of 1024 characters). + */ + description?: pulumi.Input; + /** + * Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * >, <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + */ + filter?: pulumi.Input; + /** + * The folder where Cloud Security Command Center Big Query Export + * Config lives in. + */ + folder: pulumi.Input; + /** + * The BigQuery export configuration is stored in this location. If not provided, Use global as default. + */ + location?: pulumi.Input; +} diff --git a/sdk/nodejs/securitycenter/v2organizationSccBigQueryExports.ts b/sdk/nodejs/securitycenter/v2organizationSccBigQueryExports.ts index 0196f6c0b0..1c5b233f76 100644 --- a/sdk/nodejs/securitycenter/v2organizationSccBigQueryExports.ts +++ b/sdk/nodejs/securitycenter/v2organizationSccBigQueryExports.ts @@ -27,7 +27,7 @@ import * as utilities from "../utilities"; * import * as gcp from "@pulumi/gcp"; * * const _default = new gcp.bigquery.Dataset("default", { - * datasetId: "my_dataset_id", + * datasetId: "", * friendlyName: "test", * description: "This is a test description", * location: "US", @@ -41,7 +41,7 @@ import * as utilities from "../utilities"; * name: "my-export", * bigQueryExportId: "my-export", * organization: "123456789", - * dataset: "my-dataset", + * dataset: _default.id, * location: "global", * description: "Cloud Security Command Center Findings Big Query Export Config", * filter: "state=\"ACTIVE\" AND NOT mute=\"MUTED\"", diff --git a/sdk/nodejs/securitycenter/v2projectSccBigQueryExport.ts b/sdk/nodejs/securitycenter/v2projectSccBigQueryExport.ts new file mode 100644 index 0000000000..a2e608d2c8 --- /dev/null +++ b/sdk/nodejs/securitycenter/v2projectSccBigQueryExport.ts @@ -0,0 +1,330 @@ +// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +// *** Do not edit by hand unless you're certain you know what you are doing! *** + +import * as pulumi from "@pulumi/pulumi"; +import * as utilities from "../utilities"; + +/** + * A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + * It represents exporting Security Command Center data, including assets, findings, and security marks + * using gcloud scc bqexports + * > **Note:** In order to use Cloud SCC resources, your organization must be enrolled + * in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + * Without doing so, you may run into errors during resource creation. + * + * To get more information about ProjectSccBigQueryExport, see: + * + * * [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/projects.locations.bigQueryExports) + * * How-to Guides + * * [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query) + * + * ## Example Usage + * + * ## Import + * + * ProjectSccBigQueryExport can be imported using any of these accepted formats: + * + * * `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}` + * + * * `{{project}}/{{location}}/{{big_query_export_id}}` + * + * * `{{location}}/{{big_query_export_id}}` + * + * When using the `pulumi import` command, ProjectSccBigQueryExport can be imported using one of the formats above. For example: + * + * ```sh + * $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} + * ``` + * + * ```sh + * $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default {{project}}/{{location}}/{{big_query_export_id}} + * ``` + * + * ```sh + * $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default {{location}}/{{big_query_export_id}} + * ``` + */ +export class V2ProjectSccBigQueryExport extends pulumi.CustomResource { + /** + * Get an existing V2ProjectSccBigQueryExport resource's state with the given name, ID, and optional extra + * properties used to qualify the lookup. + * + * @param name The _unique_ name of the resulting resource. + * @param id The _unique_ provider ID of the resource to lookup. + * @param state Any extra arguments used during the lookup. + * @param opts Optional settings to control the behavior of the CustomResource. + */ + public static get(name: string, id: pulumi.Input, state?: V2ProjectSccBigQueryExportState, opts?: pulumi.CustomResourceOptions): V2ProjectSccBigQueryExport { + return new V2ProjectSccBigQueryExport(name, state, { ...opts, id: id }); + } + + /** @internal */ + public static readonly __pulumiType = 'gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport'; + + /** + * Returns true if the given object is an instance of V2ProjectSccBigQueryExport. This is designed to work even + * when multiple copies of the Pulumi SDK have been loaded into the same process. + */ + public static isInstance(obj: any): obj is V2ProjectSccBigQueryExport { + if (obj === undefined || obj === null) { + return false; + } + return obj['__pulumiType'] === V2ProjectSccBigQueryExport.__pulumiType; + } + + /** + * This must be unique within the organization. + * + * + * - - - + */ + public readonly bigQueryExportId!: pulumi.Output; + /** + * The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + */ + public /*out*/ readonly createTime!: pulumi.Output; + /** + * The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + */ + public readonly dataset!: pulumi.Output; + /** + * The description of the notification config (max of 1024 characters). + */ + public readonly description!: pulumi.Output; + /** + * Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * >, <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + */ + public readonly filter!: pulumi.Output; + /** + * location Id is provided by organization. If not provided, Use global as default. + */ + public readonly location!: pulumi.Output; + /** + * Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + */ + public /*out*/ readonly mostRecentEditor!: pulumi.Output; + /** + * The resource name of this export, in the format + * `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + */ + public /*out*/ readonly name!: pulumi.Output; + /** + * The service account that needs permission to create table and upload data to the BigQuery dataset. + */ + public /*out*/ readonly principal!: pulumi.Output; + /** + * The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + */ + public readonly project!: pulumi.Output; + /** + * The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + */ + public /*out*/ readonly updateTime!: pulumi.Output; + + /** + * Create a V2ProjectSccBigQueryExport resource with the given unique name, arguments, and options. + * + * @param name The _unique_ name of the resource. + * @param args The arguments to use to populate this resource's properties. + * @param opts A bag of options that control this resource's behavior. + */ + constructor(name: string, args: V2ProjectSccBigQueryExportArgs, opts?: pulumi.CustomResourceOptions) + constructor(name: string, argsOrState?: V2ProjectSccBigQueryExportArgs | V2ProjectSccBigQueryExportState, opts?: pulumi.CustomResourceOptions) { + let resourceInputs: pulumi.Inputs = {}; + opts = opts || {}; + if (opts.id) { + const state = argsOrState as V2ProjectSccBigQueryExportState | undefined; + resourceInputs["bigQueryExportId"] = state ? state.bigQueryExportId : undefined; + resourceInputs["createTime"] = state ? state.createTime : undefined; + resourceInputs["dataset"] = state ? state.dataset : undefined; + resourceInputs["description"] = state ? state.description : undefined; + resourceInputs["filter"] = state ? state.filter : undefined; + resourceInputs["location"] = state ? state.location : undefined; + resourceInputs["mostRecentEditor"] = state ? state.mostRecentEditor : undefined; + resourceInputs["name"] = state ? state.name : undefined; + resourceInputs["principal"] = state ? state.principal : undefined; + resourceInputs["project"] = state ? state.project : undefined; + resourceInputs["updateTime"] = state ? state.updateTime : undefined; + } else { + const args = argsOrState as V2ProjectSccBigQueryExportArgs | undefined; + if ((!args || args.bigQueryExportId === undefined) && !opts.urn) { + throw new Error("Missing required property 'bigQueryExportId'"); + } + resourceInputs["bigQueryExportId"] = args ? args.bigQueryExportId : undefined; + resourceInputs["dataset"] = args ? args.dataset : undefined; + resourceInputs["description"] = args ? args.description : undefined; + resourceInputs["filter"] = args ? args.filter : undefined; + resourceInputs["location"] = args ? args.location : undefined; + resourceInputs["project"] = args ? args.project : undefined; + resourceInputs["createTime"] = undefined /*out*/; + resourceInputs["mostRecentEditor"] = undefined /*out*/; + resourceInputs["name"] = undefined /*out*/; + resourceInputs["principal"] = undefined /*out*/; + resourceInputs["updateTime"] = undefined /*out*/; + } + opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); + super(V2ProjectSccBigQueryExport.__pulumiType, name, resourceInputs, opts); + } +} + +/** + * Input properties used for looking up and filtering V2ProjectSccBigQueryExport resources. + */ +export interface V2ProjectSccBigQueryExportState { + /** + * This must be unique within the organization. + * + * + * - - - + */ + bigQueryExportId?: pulumi.Input; + /** + * The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + */ + createTime?: pulumi.Input; + /** + * The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + */ + dataset?: pulumi.Input; + /** + * The description of the notification config (max of 1024 characters). + */ + description?: pulumi.Input; + /** + * Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * >, <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + */ + filter?: pulumi.Input; + /** + * location Id is provided by organization. If not provided, Use global as default. + */ + location?: pulumi.Input; + /** + * Email address of the user who last edited the BigQuery export. + * This field is set by the server and will be ignored if provided on export creation or update. + */ + mostRecentEditor?: pulumi.Input; + /** + * The resource name of this export, in the format + * `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + * This field is provided in responses, and is ignored when provided in create requests. + */ + name?: pulumi.Input; + /** + * The service account that needs permission to create table and upload data to the BigQuery dataset. + */ + principal?: pulumi.Input; + /** + * The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + */ + project?: pulumi.Input; + /** + * The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + * Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + */ + updateTime?: pulumi.Input; +} + +/** + * The set of arguments for constructing a V2ProjectSccBigQueryExport resource. + */ +export interface V2ProjectSccBigQueryExportArgs { + /** + * This must be unique within the organization. + * + * + * - - - + */ + bigQueryExportId: pulumi.Input; + /** + * The dataset to write findings' updates to. + * Its format is "projects/[projectId]/datasets/[bigqueryDatasetId]". + * BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + */ + dataset?: pulumi.Input; + /** + * The description of the notification config (max of 1024 characters). + */ + description?: pulumi.Input; + /** + * Expression that defines the filter to apply across create/update + * events of findings. The + * expression is a list of zero or more restrictions combined via + * logical operators AND and OR. Parentheses are supported, and OR + * has higher precedence than AND. + * Restrictions have the form and may have + * a - character in front of them to indicate negation. The fields + * map to those defined in the corresponding resource. + * The supported operators are: + * * = for all value types. + * * >, <, >=, <= for integer values. + * * :, meaning substring matching, for strings. + * The supported value types are: + * * string literals in quotes. + * * integer literals without quotes. + * * boolean literals true and false without quotes. + * See + * [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + * for information on how to write a filter. + */ + filter?: pulumi.Input; + /** + * location Id is provided by organization. If not provided, Use global as default. + */ + location?: pulumi.Input; + /** + * The ID of the project in which the resource belongs. + * If it is not provided, the provider project is used. + */ + project?: pulumi.Input; +} diff --git a/sdk/nodejs/tsconfig.json b/sdk/nodejs/tsconfig.json index 8808f63758..ea1ec0435a 100644 --- a/sdk/nodejs/tsconfig.json +++ b/sdk/nodejs/tsconfig.json @@ -124,6 +124,7 @@ "artifactregistry/vpcscConfig.ts", "assuredworkloads/index.ts", "assuredworkloads/workload.ts", + "backupdisasterrecovery/backupVault.ts", "backupdisasterrecovery/getManagementServer.ts", "backupdisasterrecovery/index.ts", "backupdisasterrecovery/managementServer.ts", @@ -232,6 +233,7 @@ "certificatemanager/certificateMapEntry.ts", "certificatemanager/dnsAuthorization.ts", "certificatemanager/getCertificateMap.ts", + "certificatemanager/getCertificates.ts", "certificatemanager/index.ts", "certificatemanager/trustConfig.ts", "cloudasset/folderFeed.ts", @@ -947,6 +949,8 @@ "kms/ekmConnectionIamMember.ts", "kms/ekmConnectionIamPolicy.ts", "kms/getCryptoKeyIamPolicy.ts", + "kms/getCryptoKeyLatestVersion.ts", + "kms/getCryptoKeyVersions.ts", "kms/getCryptoKeys.ts", "kms/getEkmConnectionIamPolicy.ts", "kms/getKMSCryptoKey.ts", @@ -1222,6 +1226,7 @@ "securitycenter/sourceIamPolicy.ts", "securitycenter/v2folderMuteConfig.ts", "securitycenter/v2folderNotificationConfig.ts", + "securitycenter/v2folderSccBigQueryExport.ts", "securitycenter/v2organizationMuteConfig.ts", "securitycenter/v2organizationNotificationConfig.ts", "securitycenter/v2organizationSccBigQueryExports.ts", @@ -1231,6 +1236,7 @@ "securitycenter/v2organizationSourceIamPolicy.ts", "securitycenter/v2projectMuteConfig.ts", "securitycenter/v2projectNotificationConfig.ts", + "securitycenter/v2projectSccBigQueryExport.ts", "securityposture/index.ts", "securityposture/posture.ts", "securityposture/postureDeployment.ts", diff --git a/sdk/nodejs/types/input.ts b/sdk/nodejs/types/input.ts index f802c2d11f..4a6cab3659 100644 --- a/sdk/nodejs/types/input.ts +++ b/sdk/nodejs/types/input.ts @@ -2309,6 +2309,25 @@ export namespace alloydb { primaryClusterName: pulumi.Input; } + export interface ClusterTrialMetadata { + /** + * End time of the trial cluster. + */ + endTime?: pulumi.Input; + /** + * Grace end time of the trial cluster. + */ + graceEndTime?: pulumi.Input; + /** + * Start time of the trial cluster. + */ + startTime?: pulumi.Input; + /** + * Upgrade time of the trial cluster to standard cluster. + */ + upgradeTime?: pulumi.Input; + } + export interface InstanceClientConnectionConfig { /** * Configuration to enforce connectors only (ex: AuthProxy) connections to the database. @@ -5192,6 +5211,13 @@ export namespace bigquery { enableFailureEmail: pulumi.Input; } + export interface DataTransferConfigEncryptionConfiguration { + /** + * The name of the KMS key used for encrypting BigQuery data. + */ + kmsKeyName: pulumi.Input; + } + export interface DataTransferConfigScheduleOptions { /** * If true, automatic scheduling of data transfer runs for this @@ -6643,13 +6669,42 @@ export namespace bigqueryanalyticshub { title: pulumi.Input; } + export interface DataExchangeSharingEnvironmentConfig { + /** + * Data Clean Room (DCR), used for privacy-safe and secured data sharing. + */ + dcrExchangeConfig?: pulumi.Input; + /** + * Default Analytics Hub data exchange, used for secured data sharing. + */ + defaultExchangeConfig?: pulumi.Input; + } + + export interface DataExchangeSharingEnvironmentConfigDcrExchangeConfig { + } + + export interface DataExchangeSharingEnvironmentConfigDefaultExchangeConfig { + } + export interface ListingBigqueryDataset { /** * Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 + */ + dataset: pulumi.Input; + /** + * Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + * Structure is documented below. + */ + selectedResources?: pulumi.Input[]>; + } + + export interface ListingBigqueryDatasetSelectedResource { + /** + * Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" * * - - - */ - dataset: pulumi.Input; + table?: pulumi.Input; } export interface ListingDataProvider { @@ -6691,6 +6746,11 @@ export namespace bigqueryanalyticshub { * If true, enable restricted export. */ enabled?: pulumi.Input; + /** + * (Output) + * If true, restrict direct table access(read api/tabledata.list) on linked table. + */ + restrictDirectTableAccess?: pulumi.Input; /** * If true, restrict export of query result derived from restricted linked dataset table. */ @@ -6872,6 +6932,10 @@ export namespace bigtable { * The name of the column family. */ family: pulumi.Input; + /** + * The type of the column family. + */ + type?: pulumi.Input; } export interface TableIamBindingCondition { @@ -10458,11 +10522,11 @@ export namespace cloudbuild { export interface WorkerPoolWorkerConfig { /** - * Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + * Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. */ diskSizeGb?: pulumi.Input; /** - * Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + * Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. */ machineType?: pulumi.Input; /** @@ -13158,8 +13222,7 @@ export namespace cloudrun { name: pulumi.Input; /** * A filesystem backed by a Network File System share. This filesystem requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * Structure is documented below. */ nfs?: pulumi.Input; @@ -13176,8 +13239,7 @@ export namespace cloudrun { /** * Unique name representing the type of file system to be created. Cloud Run supports the following values: * * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" */ driver: pulumi.Input; /** @@ -13585,7 +13647,7 @@ export namespace cloudrunv2 { */ emptyDir?: pulumi.Input; /** - * Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + * Cloud Storage bucket mounted as a volume using GCSFuse. * Structure is documented below. */ gcs?: pulumi.Input; @@ -13594,7 +13656,7 @@ export namespace cloudrunv2 { */ name: pulumi.Input; /** - * NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + * NFS share mounted as a volume. * Structure is documented below. */ nfs?: pulumi.Input; @@ -13889,6 +13951,11 @@ export namespace cloudrunv2 { * Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. */ serviceAccount?: pulumi.Input; + /** + * Enables Cloud Service Mesh for this Revision. + * Structure is documented below. + */ + serviceMesh?: pulumi.Input; /** * Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity */ @@ -14220,6 +14287,15 @@ export namespace cloudrunv2 { minInstanceCount?: pulumi.Input; } + export interface ServiceTemplateServiceMesh { + /** + * The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + * + * - - - + */ + mesh?: pulumi.Input; + } + export interface ServiceTemplateVolume { /** * For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. @@ -14232,7 +14308,7 @@ export namespace cloudrunv2 { */ emptyDir?: pulumi.Input; /** - * Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + * Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. * Structure is documented below. */ gcs?: pulumi.Input; @@ -14290,8 +14366,6 @@ export namespace cloudrunv2 { path: pulumi.Input; /** * If true, mount the NFS volume as read only - * - * - - - */ readOnly?: pulumi.Input; /** @@ -14651,6 +14725,151 @@ export namespace cloudtasks { version?: pulumi.Input; } + export interface QueueHttpTarget { + /** + * HTTP target headers. + * This map contains the header field names and values. + * Headers will be set when running the CreateTask and/or BufferTask. + * These headers represent a subset of the headers that will be configured for the task's HTTP request. + * Some HTTP request headers will be ignored or replaced. + * Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + * The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + * Structure is documented below. + */ + headerOverrides?: pulumi.Input[]>; + /** + * The HTTP method to use for the request. + * When specified, it overrides HttpRequest for the task. + * Note that if the value is set to GET the body of the task will be ignored at execution time. + * Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + */ + httpMethod?: pulumi.Input; + /** + * If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + * This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + * Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + * Structure is documented below. + */ + oauthToken?: pulumi.Input; + /** + * If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + * This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + * Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + * Structure is documented below. + */ + oidcToken?: pulumi.Input; + /** + * URI override. + * When specified, overrides the execution URI for all the tasks in the queue. + * Structure is documented below. + */ + uriOverride?: pulumi.Input; + } + + export interface QueueHttpTargetHeaderOverride { + /** + * Header embodying a key and a value. + * Structure is documented below. + */ + header: pulumi.Input; + } + + export interface QueueHttpTargetHeaderOverrideHeader { + /** + * The Key of the header. + */ + key: pulumi.Input; + /** + * The Value of the header. + */ + value: pulumi.Input; + } + + export interface QueueHttpTargetOauthToken { + /** + * OAuth scope to be used for generating OAuth access token. + * If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + */ + scope?: pulumi.Input; + /** + * Service account email to be used for generating OAuth token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + */ + serviceAccountEmail: pulumi.Input; + } + + export interface QueueHttpTargetOidcToken { + /** + * Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + */ + audience?: pulumi.Input; + /** + * Service account email to be used for generating OIDC token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + */ + serviceAccountEmail: pulumi.Input; + } + + export interface QueueHttpTargetUriOverride { + /** + * Host override. + * When specified, replaces the host part of the task URL. + * For example, if the task URL is "https://www.google.com", and host value + * is set to "example.net", the overridden URI will be changed to "https://example.net". + * Host value cannot be an empty string (INVALID_ARGUMENT). + */ + host?: pulumi.Input; + /** + * URI path. + * When specified, replaces the existing path of the task URL. + * Setting the path value to an empty string clears the URI path segment. + * Structure is documented below. + */ + pathOverride?: pulumi.Input; + /** + * Port override. + * When specified, replaces the port part of the task URI. + * For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + * Note that the port value must be a positive integer. + * Setting the port to 0 (Zero) clears the URI port. + */ + port?: pulumi.Input; + /** + * URI query. + * When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + * Structure is documented below. + */ + queryOverride?: pulumi.Input; + /** + * Scheme override. + * When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + * Possible values are: `HTTP`, `HTTPS`. + */ + scheme?: pulumi.Input; + /** + * URI Override Enforce Mode + * When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + * Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + */ + uriOverrideEnforceMode?: pulumi.Input; + } + + export interface QueueHttpTargetUriOverridePathOverride { + /** + * The URI path (e.g., /users/1234). Default is an empty string. + */ + path?: pulumi.Input; + } + + export interface QueueHttpTargetUriOverrideQueryOverride { + /** + * The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + */ + queryParams?: pulumi.Input; + } + export interface QueueIamBindingCondition { description?: pulumi.Input; expression: pulumi.Input; @@ -16341,7 +16560,7 @@ export namespace compute { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. */ ports?: pulumi.Input[]>; @@ -16360,7 +16579,7 @@ export namespace compute { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. */ ports?: pulumi.Input[]>; @@ -16979,6 +17198,10 @@ export namespace compute { * Structure is documented below. */ initializeParams?: pulumi.Input; + /** + * The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) + */ + interface?: pulumi.Input; /** * The selfLink of the encryption key that is * stored in Google Cloud KMS to encrypt this disk. Only one of `kmsKeySelfLink` @@ -17137,6 +17360,10 @@ export namespace compute { * Parameters with which a disk was created alongside the instance. */ initializeParams?: pulumi.Input; + /** + * The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) + */ + interface?: pulumi.Input; /** * The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. */ @@ -17571,6 +17798,10 @@ export namespace compute { * Parameters with which a disk was created alongside the instance. */ initializeParams?: pulumi.Input; + /** + * The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) + */ + interface?: pulumi.Input; /** * The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. */ @@ -18309,7 +18540,7 @@ export namespace compute { subnetwork?: pulumi.Input; /** * The project in which the subnetwork belongs. - * If the `subnetwork` is a self_link, this field is ignored in favor of the project + * If the `subnetwork` is a self_link, this field is set to the project * defined in the subnetwork self_link. If the `subnetwork` is a name and this * field is not provided, the provider project is used. */ @@ -19489,6 +19720,19 @@ export namespace compute { projectId: pulumi.Input; } + export interface NodeTemplateAccelerator { + /** + * The number of the guest accelerator cards exposed to this + * node template. + */ + acceleratorCount?: pulumi.Input; + /** + * Full or partial URL of the accelerator type resource to expose + * to this node template. + */ + acceleratorType?: pulumi.Input; + } + export interface NodeTemplateNodeTypeFlexibility { /** * Number of virtual CPUs to use. @@ -29635,6 +29879,10 @@ export namespace container { * One of `"none"` or `"static"`. Defaults to `none` when `kubeletConfig` is unset. */ cpuManagerPolicy: pulumi.Input; + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + */ + insecureKubeletReadonlyPortEnabled?: pulumi.Input; /** * Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. */ @@ -29861,9 +30109,14 @@ export namespace container { export interface ClusterNodePoolAutoConfig { /** - * The network tag config for the cluster's automatically provisioned node pools. + * The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. */ networkTags?: pulumi.Input; + /** + * Kubelet configuration for Autopilot clusters. Currently, only `insecureKubeletReadonlyPortEnabled` is supported here. + * Structure is documented below. + */ + nodeKubeletConfig?: pulumi.Input; /** * A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. */ @@ -29877,6 +30130,13 @@ export namespace container { tags?: pulumi.Input[]>; } + export interface ClusterNodePoolAutoConfigNodeKubeletConfig { + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + */ + insecureKubeletReadonlyPortEnabled?: pulumi.Input; + } + export interface ClusterNodePoolAutoscaling { /** * Location policy specifies the algorithm used when scaling-up the node pool. "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduces preemption risk for Spot VMs. @@ -29916,6 +30176,10 @@ export namespace container { * The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming) across all the node pools within the cluster. Structure is documented below. */ gcfsConfig?: pulumi.Input; + /** + * Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + */ + insecureKubeletReadonlyPortEnabled?: pulumi.Input; /** * The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. */ @@ -30465,6 +30729,10 @@ export namespace container { * One of `"none"` or `"static"`. Defaults to `none` when `kubeletConfig` is unset. */ cpuManagerPolicy: pulumi.Input; + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + */ + insecureKubeletReadonlyPortEnabled?: pulumi.Input; /** * Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. */ @@ -31341,6 +31609,10 @@ export namespace container { * Control the CPU management policy on the node. */ cpuManagerPolicy: pulumi.Input; + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + */ + insecureKubeletReadonlyPortEnabled?: pulumi.Input; /** * Controls the maximum number of processes allowed to run in a pod. */ @@ -31885,33 +32157,33 @@ export namespace databasemigrationservice { */ cloudSqlId?: pulumi.Input; /** - * Required. The IP or hostname of the source MySQL database. + * The IP or hostname of the source MySQL database. */ - host: pulumi.Input; + host?: pulumi.Input; /** - * Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. */ - password: pulumi.Input; + password?: pulumi.Input; /** * (Output) * Output only. Indicates If this connection profile password is stored. */ passwordSet?: pulumi.Input; /** - * Required. The network port of the source MySQL database. + * The network port of the source MySQL database. */ - port: pulumi.Input; + port?: pulumi.Input; /** * SSL configuration for the destination to connect to the source database. * Structure is documented below. */ ssl?: pulumi.Input; /** - * Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. */ - username: pulumi.Input; + username?: pulumi.Input; } export interface ConnectionProfileMysqlSsl { @@ -32052,43 +32324,47 @@ export namespace databasemigrationservice { } export interface ConnectionProfilePostgresql { + /** + * If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. + */ + alloydbClusterId?: pulumi.Input; /** * If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. */ cloudSqlId?: pulumi.Input; /** - * Required. The IP or hostname of the source MySQL database. + * The IP or hostname of the source MySQL database. */ - host: pulumi.Input; + host?: pulumi.Input; /** * (Output) * Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with. */ networkArchitecture?: pulumi.Input; /** - * Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. */ - password: pulumi.Input; + password?: pulumi.Input; /** * (Output) * Output only. Indicates If this connection profile password is stored. */ passwordSet?: pulumi.Input; /** - * Required. The network port of the source MySQL database. + * The network port of the source MySQL database. */ - port: pulumi.Input; + port?: pulumi.Input; /** * SSL configuration for the destination to connect to the source database. * Structure is documented below. */ ssl?: pulumi.Input; /** - * Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. */ - username: pulumi.Input; + username?: pulumi.Input; } export interface ConnectionProfilePostgresqlSsl { @@ -36254,6 +36530,11 @@ export namespace dataloss { * Structure is documented below. */ pubSubNotification?: pulumi.Input; + /** + * Publish a message into the Pub/Sub topic. + * Structure is documented below. + */ + tagResources?: pulumi.Input; } export interface PreventionDiscoveryConfigActionExportData { @@ -36335,6 +36616,51 @@ export namespace dataloss { minimumSensitivityScore?: pulumi.Input; } + export interface PreventionDiscoveryConfigActionTagResources { + /** + * Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + */ + lowerDataRiskToLow?: pulumi.Input; + /** + * The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + * Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + */ + profileGenerationsToTags?: pulumi.Input[]>; + /** + * The tags to associate with different conditions. + * Structure is documented below. + */ + tagConditions?: pulumi.Input[]>; + } + + export interface PreventionDiscoveryConfigActionTagResourcesTagCondition { + /** + * Conditions attaching the tag to a resource on its profile having this sensitivity score. + * Structure is documented below. + */ + sensitivityScore?: pulumi.Input; + /** + * The tag value to attach to resources. + * Structure is documented below. + */ + tag?: pulumi.Input; + } + + export interface PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore { + /** + * The sensitivity score applied to the resource. + * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + */ + score: pulumi.Input; + } + + export interface PreventionDiscoveryConfigActionTagResourcesTagConditionTag { + /** + * The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + */ + namespacedValue?: pulumi.Input; + } + export interface PreventionDiscoveryConfigError { /** * A list of messages that carry the error details. @@ -36429,6 +36755,11 @@ export namespace dataloss { } export interface PreventionDiscoveryConfigTargetBigQueryTargetCadence { + /** + * Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + * Structure is documented below. + */ + inspectTemplateModifiedCadence?: pulumi.Input; /** * Governs when to update data profiles when a schema is modified * Structure is documented below. @@ -36441,6 +36772,14 @@ export namespace dataloss { tableModifiedCadence?: pulumi.Input; } + export interface PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence { + /** + * How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + */ + frequency?: pulumi.Input; + } + export interface PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence { /** * Frequency to regenerate data profiles when the schema is modified. Defaults to monthly. @@ -36686,6 +37025,11 @@ export namespace dataloss { } export interface PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence { + /** + * Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + * Structure is documented below. + */ + inspectTemplateModifiedCadence?: pulumi.Input; /** * Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. @@ -36698,6 +37042,14 @@ export namespace dataloss { schemaModifiedCadence?: pulumi.Input; } + export interface PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence { + /** + * How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + */ + frequency: pulumi.Input; + } + export interface PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence { /** * Frequency to regenerate data profiles when the schema is modified. Defaults to monthly. @@ -41706,7 +42058,7 @@ export namespace dataproc { */ softwareConfig?: pulumi.Input; /** - * A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). */ stagingBucket?: pulumi.Input; /** @@ -41752,7 +42104,7 @@ export namespace dataproc { */ internalIpOnly?: pulumi.Input; /** - * The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + * The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). */ metadata?: pulumi.Input<{[key: string]: pulumi.Input}>; /** @@ -41788,7 +42140,7 @@ export namespace dataproc { */ subnetwork?: pulumi.Input; /** - * The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + * The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). */ tags?: pulumi.Input[]>; /** @@ -41858,26 +42210,26 @@ export namespace dataproc { */ executableFile?: pulumi.Input; /** - * Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + * Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. */ executionTimeout?: pulumi.Input; } export interface WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { /** - * The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). */ autoDeleteTime?: pulumi.Input; /** - * The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). */ autoDeleteTtl?: pulumi.Input; /** - * The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + * The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). */ idleDeleteTtl?: pulumi.Input; /** - * Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). */ idleStartTime?: pulumi.Input; } @@ -41912,7 +42264,7 @@ export namespace dataproc { */ managedGroupConfigs?: pulumi.Input[]>; /** - * Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + * Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */ minCpuPlatform?: pulumi.Input; /** @@ -43437,6 +43789,10 @@ export namespace datastream { } export interface StreamSourceConfigSqlServerSourceConfig { + /** + * CDC reader reads from change tables. + */ + changeTables?: pulumi.Input; /** * SQL Server objects to exclude from the stream. * Structure is documented below. @@ -43455,6 +43811,13 @@ export namespace datastream { * Max concurrent CDC tasks. */ maxConcurrentCdcTasks?: pulumi.Input; + /** + * CDC reader reads from transaction logs. + */ + transactionLogs?: pulumi.Input; + } + + export interface StreamSourceConfigSqlServerSourceConfigChangeTables { } export interface StreamSourceConfigSqlServerSourceConfigExcludeObjects { @@ -43604,6 +43967,9 @@ export namespace datastream { */ scale?: pulumi.Input; } + + export interface StreamSourceConfigSqlServerSourceConfigTransactionLogs { + } } export namespace deploymentmanager { @@ -45846,6 +46212,11 @@ export namespace discoveryengine { } export interface DataStoreDocumentProcessingConfig { + /** + * Whether chunking mode is enabled. + * Structure is documented below. + */ + chunkingConfig?: pulumi.Input; /** * Configurations for default Document parser. If not specified, this resource * will be configured to use a default DigitalParsingConfig, and the default parsing @@ -45865,11 +46236,36 @@ export namespace discoveryengine { parsingConfigOverrides?: pulumi.Input[]>; } + export interface DataStoreDocumentProcessingConfigChunkingConfig { + /** + * Configuration for the layout based chunking. + * Structure is documented below. + */ + layoutBasedChunkingConfig?: pulumi.Input; + } + + export interface DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig { + /** + * The token size limit for each chunk. + * Supported values: 100-500 (inclusive). Default value: 500. + */ + chunkSize?: pulumi.Input; + /** + * Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + * Default value: False. + */ + includeAncestorHeadings?: pulumi.Input; + } + export interface DataStoreDocumentProcessingConfigDefaultParsingConfig { /** * Configurations applied to digital parser. */ digitalParsingConfig?: pulumi.Input; + /** + * Configurations applied to layout parser. + */ + layoutParsingConfig?: pulumi.Input; /** * Configurations applied to OCR parser. Currently it only applies to PDFs. * Structure is documented below. @@ -45880,6 +46276,9 @@ export namespace discoveryengine { export interface DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig { } + export interface DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig { + } + export interface DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig { /** * If true, will use native text instead of OCR text on pages containing native text. @@ -45896,6 +46295,10 @@ export namespace discoveryengine { * The identifier for this object. Format specified above. */ fileType: pulumi.Input; + /** + * Configurations applied to layout parser. + */ + layoutParsingConfig?: pulumi.Input; /** * Configurations applied to OCR parser. Currently it only applies to PDFs. * Structure is documented below. @@ -45906,6 +46309,9 @@ export namespace discoveryengine { export interface DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfig { } + export interface DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig { + } + export interface DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig { /** * If true, will use native text instead of OCR text on pages containing native text. @@ -48976,7 +49382,9 @@ export namespace gkehub { export interface FeatureMembershipConfigmanagement { /** + * (Optional, Deprecated) * Binauthz configuration for the cluster. Structure is documented below. + * This field will be ignored and should not be set. */ binauthz?: pulumi.Input; /** @@ -48985,6 +49393,10 @@ export namespace gkehub { configSync?: pulumi.Input; /** * Hierarchy Controller configuration for the cluster. Structure is documented below. + * Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. + * Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. + * Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) + * to migrate from Hierarchy Controller to HNC. */ hierarchyController?: pulumi.Input; /** @@ -48993,6 +49405,8 @@ export namespace gkehub { management?: pulumi.Input; /** * Policy Controller configuration for the cluster. Structure is documented below. + * Configuring Policy Controller through the configmanagement feature is no longer recommended. + * Use the policycontroller feature instead. */ policyController?: pulumi.Input; /** @@ -52192,9 +52606,55 @@ export namespace iam { export interface WorkloadIdentityPoolProviderSaml { /** * SAML Identity provider configuration metadata xml doc. + * + * The `x509` block supports: */ idpMetadataXml: pulumi.Input; } + + export interface WorkloadIdentityPoolProviderX509 { + /** + * A Trust store, use this trust store as a wrapper to config the trust + * anchor and optional intermediate cas to help build the trust chain for + * the incoming end entity certificate. Follow the x509 guidelines to + * define those PEM encoded certs. Only 1 trust store is currently + * supported. + */ + trustStore: pulumi.Input; + } + + export interface WorkloadIdentityPoolProviderX509TrustStore { + /** + * Set of intermediate CA certificates used for building the trust chain to + * trust anchor. + * IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + * Structure is documented below. + */ + intermediateCas?: pulumi.Input[]>; + /** + * List of Trust Anchors to be used while performing validation + * against a given TrustStore. The incoming end entity's certificate + * must be chained up to one of the trust anchors here. + * Structure is documented below. + */ + trustAnchors: pulumi.Input[]>; + } + + export interface WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa { + /** + * PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + */ + pemCertificate?: pulumi.Input; + } + + export interface WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor { + /** + * PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + */ + pemCertificate?: pulumi.Input; + } } export namespace iap { @@ -56820,6 +57280,10 @@ export namespace networkconnectivity { * IP ranges encompassing the subnets to be excluded from peering. */ excludeExportRanges?: pulumi.Input[]>; + /** + * IP ranges allowed to be included from peering. + */ + includeExportRanges?: pulumi.Input[]>; /** * The URI of the VPC network resource. */ @@ -62002,6 +62466,10 @@ export namespace pubsub { * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ maxDuration?: pulumi.Input; + /** + * The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + */ + maxMessages?: pulumi.Input; /** * The service account to use to write to Cloud Storage. If not specified, the Pub/Sub * [service agent](https://cloud.google.com/iam/docs/service-agents), @@ -62016,6 +62484,10 @@ export namespace pubsub { } export interface SubscriptionCloudStorageConfigAvroConfig { + /** + * When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + */ + useTopicSchema?: pulumi.Input; /** * When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. */ @@ -62323,6 +62795,105 @@ export namespace redis { network?: pulumi.Input; } + export interface ClusterMaintenancePolicy { + /** + * (Output) + * Output only. The time when the policy was created. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + */ + createTime?: pulumi.Input; + /** + * (Output) + * Output only. The time when the policy was last updated. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + */ + updateTime?: pulumi.Input; + /** + * Optional. Maintenance window that is applied to resources covered by this policy. + * Minimum 1. For the current version, the maximum number + * of weeklyWindow is expected to be one. + * Structure is documented below. + */ + weeklyMaintenanceWindows?: pulumi.Input[]>; + } + + export interface ClusterMaintenancePolicyWeeklyMaintenanceWindow { + /** + * Required. The day of week that maintenance updates occur. + * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + * - MONDAY: Monday + * - TUESDAY: Tuesday + * - WEDNESDAY: Wednesday + * - THURSDAY: Thursday + * - FRIDAY: Friday + * - SATURDAY: Saturday + * - SUNDAY: Sunday + * Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + */ + day: pulumi.Input; + /** + * (Output) + * Output only. Duration of the maintenance window. + * The current window is fixed at 1 hour. + * A duration in seconds with up to nine fractional digits, + * terminated by 's'. Example: "3.5s". + */ + duration?: pulumi.Input; + /** + * Required. Start time of the window in UTC time. + * Structure is documented below. + */ + startTime: pulumi.Input; + } + + export interface ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime { + /** + * Hours of day in 24 hour format. Should be from 0 to 23. + * An API may choose to allow the value "24:00:00" for scenarios like business closing time. + */ + hours?: pulumi.Input; + /** + * Minutes of hour of day. Must be from 0 to 59. + */ + minutes?: pulumi.Input; + /** + * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + */ + nanos?: pulumi.Input; + /** + * Seconds of minutes of the time. Must normally be from 0 to 59. + * An API may allow the value 60 if it allows leap-seconds. + */ + seconds?: pulumi.Input; + } + + export interface ClusterMaintenanceSchedule { + /** + * (Output) + * Output only. The end time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + */ + endTime?: pulumi.Input; + /** + * (Output) + * Output only. The deadline that the maintenance schedule start time + * can not go beyond, including reschedule. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + */ + scheduleDeadlineTime?: pulumi.Input; + /** + * (Output) + * Output only. The start time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + */ + startTime?: pulumi.Input; + } + export interface ClusterPscConfig { /** * Required. The consumer network where the network address of diff --git a/sdk/nodejs/types/output.ts b/sdk/nodejs/types/output.ts index 2202893e46..45dc5ba74e 100644 --- a/sdk/nodejs/types/output.ts +++ b/sdk/nodejs/types/output.ts @@ -2306,6 +2306,25 @@ export namespace alloydb { primaryClusterName: string; } + export interface ClusterTrialMetadata { + /** + * End time of the trial cluster. + */ + endTime?: string; + /** + * Grace end time of the trial cluster. + */ + graceEndTime?: string; + /** + * Start time of the trial cluster. + */ + startTime?: string; + /** + * Upgrade time of the trial cluster to standard cluster. + */ + upgradeTime?: string; + } + export interface GetLocationsLocation { /** * The friendly name for this location, typically a nearby city name. For example, "Tokyo". @@ -5747,6 +5766,13 @@ export namespace bigquery { enableFailureEmail: boolean; } + export interface DataTransferConfigEncryptionConfiguration { + /** + * The name of the KMS key used for encrypting BigQuery data. + */ + kmsKeyName: string; + } + export interface DataTransferConfigScheduleOptions { /** * If true, automatic scheduling of data transfer runs for this @@ -7335,13 +7361,42 @@ export namespace bigqueryanalyticshub { title: string; } + export interface DataExchangeSharingEnvironmentConfig { + /** + * Data Clean Room (DCR), used for privacy-safe and secured data sharing. + */ + dcrExchangeConfig?: outputs.bigqueryanalyticshub.DataExchangeSharingEnvironmentConfigDcrExchangeConfig; + /** + * Default Analytics Hub data exchange, used for secured data sharing. + */ + defaultExchangeConfig?: outputs.bigqueryanalyticshub.DataExchangeSharingEnvironmentConfigDefaultExchangeConfig; + } + + export interface DataExchangeSharingEnvironmentConfigDcrExchangeConfig { + } + + export interface DataExchangeSharingEnvironmentConfigDefaultExchangeConfig { + } + export interface ListingBigqueryDataset { /** * Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 + */ + dataset: string; + /** + * Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + * Structure is documented below. + */ + selectedResources?: outputs.bigqueryanalyticshub.ListingBigqueryDatasetSelectedResource[]; + } + + export interface ListingBigqueryDatasetSelectedResource { + /** + * Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" * * - - - */ - dataset: string; + table?: string; } export interface ListingDataProvider { @@ -7383,6 +7438,11 @@ export namespace bigqueryanalyticshub { * If true, enable restricted export. */ enabled?: boolean; + /** + * (Output) + * If true, restrict direct table access(read api/tabledata.list) on linked table. + */ + restrictDirectTableAccess: boolean; /** * If true, restrict export of query result derived from restricted linked dataset table. */ @@ -7566,6 +7626,10 @@ export namespace bigtable { * The name of the column family. */ family: string; + /** + * The type of the column family. + */ + type?: string; } export interface TableIamBindingCondition { @@ -10513,6 +10577,129 @@ export namespace certificatemanager { ports: number[]; } + export interface GetCertificatesCertificate { + /** + * A human-readable description of the resource. + */ + description: string; + effectiveLabels: {[key: string]: string}; + /** + * Set of label tags associated with the Certificate resource. + * + * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + * Please refer to the field 'effective_labels' for all of the labels present on the resource. + */ + labels: {[key: string]: string}; + /** + * The Certificate Manager location. If not specified, "global" is used. + */ + location: string; + /** + * Configuration and state of a Managed Certificate. + * Certificate Manager provisions and renews Managed Certificates + * automatically, for as long as it's authorized to do so. + */ + manageds: outputs.certificatemanager.GetCertificatesCertificateManaged[]; + /** + * A user-defined name of the certificate. Certificate names must be unique + * The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, + * and all following characters must be a dash, underscore, letter or digit. + */ + name: string; + /** + * The ID of the project in which the resource belongs. If it + * is not provided, the provider project is used. + */ + project: string; + /** + * The combination of labels configured directly on the resource + * and default labels configured on the provider. + */ + pulumiLabels: {[key: string]: string}; + /** + * The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + */ + sanDnsnames: string[]; + /** + * The scope of the certificate. + * + * DEFAULT: Certificates with default scope are served from core Google data centers. + * If unsure, choose this option. + * + * EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence. + * See https://cloud.google.com/vpc/docs/edge-locations. + * + * ALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs). + * See https://cloud.google.com/compute/docs/regions-zones + */ + scope: string; + } + + export interface GetCertificatesCertificateManaged { + /** + * Detailed state of the latest authorization attempt for each domain + * specified for this Managed Certificate. + */ + authorizationAttemptInfos: outputs.certificatemanager.GetCertificatesCertificateManagedAuthorizationAttemptInfo[]; + /** + * Authorizations that will be used for performing domain authorization. Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + */ + dnsAuthorizations: string[]; + /** + * The domains for which a managed SSL certificate will be generated. + * Wildcard domains are only supported with DNS challenge resolution + */ + domains: string[]; + /** + * The resource name for a CertificateIssuanceConfig used to configure private PKI certificates in the format projects/*/locations/*/certificateIssuanceConfigs/*. + * If this field is not set, the certificates will instead be publicly signed as documented at https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa. + * Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + */ + issuanceConfig: string; + /** + * Information about issues with provisioning this Managed Certificate. + */ + provisioningIssues: outputs.certificatemanager.GetCertificatesCertificateManagedProvisioningIssue[]; + /** + * A state of this Managed Certificate. + */ + state: string; + } + + export interface GetCertificatesCertificateManagedAuthorizationAttemptInfo { + /** + * Human readable explanation for reaching the state. Provided to help + * address the configuration issues. + * Not guaranteed to be stable. For programmatic access use 'failure_reason' field. + */ + details: string; + /** + * Domain name of the authorization attempt. + */ + domain: string; + /** + * Reason for failure of the authorization attempt for the domain. + */ + failureReason: string; + /** + * State of the domain for managed certificate issuance. + */ + state: string; + } + + export interface GetCertificatesCertificateManagedProvisioningIssue { + /** + * Human readable explanation about the issue. Provided to help address + * the configuration issues. + * Not guaranteed to be stable. For programmatic access use 'reason' field. + */ + details: string; + /** + * Reason for provisioning failures. + */ + reason: string; + } + export interface TrustConfigAllowlistedCertificate { /** * PEM certificate that is allowlisted. The certificate can be up to 5k bytes, and must be a parseable X.509 certificate. @@ -12508,11 +12695,11 @@ export namespace cloudbuild { export interface WorkerPoolWorkerConfig { /** - * Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + * Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. */ diskSizeGb?: number; /** - * Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + * Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. */ machineType?: string; /** @@ -15814,8 +16001,7 @@ export namespace cloudrun { name: string; /** * A filesystem backed by a Network File System share. This filesystem requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" */ nfs: outputs.cloudrun.GetServiceTemplateSpecVolumeNf[]; /** @@ -15830,8 +16016,7 @@ export namespace cloudrun { /** * Unique name representing the type of file system to be created. Cloud Run supports the following values: * * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" */ driver: string; /** @@ -16672,8 +16857,7 @@ export namespace cloudrun { name: string; /** * A filesystem backed by a Network File System share. This filesystem requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" * Structure is documented below. */ nfs?: outputs.cloudrun.ServiceTemplateSpecVolumeNfs; @@ -16690,8 +16874,7 @@ export namespace cloudrun { /** * Unique name representing the type of file system to be created. Cloud Run supports the following values: * * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - * run.googleapis.com/execution-environment annotation to be set to "gen2" and - * run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + * run.googleapis.com/execution-environment annotation to be unset or set to "gen2" */ driver: string; /** @@ -17073,7 +17256,7 @@ export namespace cloudrunv2 { */ emptyDirs: outputs.cloudrunv2.GetJobTemplateTemplateVolumeEmptyDir[]; /** - * Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + * Cloud Storage bucket mounted as a volume using GCSFuse. */ gcs: outputs.cloudrunv2.GetJobTemplateTemplateVolumeGc[]; /** @@ -17081,7 +17264,7 @@ export namespace cloudrunv2 { */ name: string; /** - * NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + * NFS share mounted as a volume. */ nfs: outputs.cloudrunv2.GetJobTemplateTemplateVolumeNf[]; /** @@ -17341,6 +17524,10 @@ export namespace cloudrunv2 { * Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. */ serviceAccount: string; + /** + * Enables Cloud Service Mesh for this Revision. + */ + serviceMeshes: outputs.cloudrunv2.GetServiceTemplateServiceMesh[]; /** * Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity */ @@ -17657,6 +17844,13 @@ export namespace cloudrunv2 { minInstanceCount: number; } + export interface GetServiceTemplateServiceMesh { + /** + * The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + */ + mesh: string; + } + export interface GetServiceTemplateVolume { /** * For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. @@ -17667,7 +17861,7 @@ export namespace cloudrunv2 { */ emptyDirs: outputs.cloudrunv2.GetServiceTemplateVolumeEmptyDir[]; /** - * Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + * Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. */ gcs: outputs.cloudrunv2.GetServiceTemplateVolumeGc[]; /** @@ -18150,7 +18344,7 @@ export namespace cloudrunv2 { */ emptyDir?: outputs.cloudrunv2.JobTemplateTemplateVolumeEmptyDir; /** - * Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + * Cloud Storage bucket mounted as a volume using GCSFuse. * Structure is documented below. */ gcs?: outputs.cloudrunv2.JobTemplateTemplateVolumeGcs; @@ -18159,7 +18353,7 @@ export namespace cloudrunv2 { */ name: string; /** - * NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + * NFS share mounted as a volume. * Structure is documented below. */ nfs?: outputs.cloudrunv2.JobTemplateTemplateVolumeNfs; @@ -18454,6 +18648,11 @@ export namespace cloudrunv2 { * Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. */ serviceAccount: string; + /** + * Enables Cloud Service Mesh for this Revision. + * Structure is documented below. + */ + serviceMesh?: outputs.cloudrunv2.ServiceTemplateServiceMesh; /** * Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity */ @@ -18785,6 +18984,15 @@ export namespace cloudrunv2 { minInstanceCount?: number; } + export interface ServiceTemplateServiceMesh { + /** + * The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + * + * - - - + */ + mesh?: string; + } + export interface ServiceTemplateVolume { /** * For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. @@ -18797,7 +19005,7 @@ export namespace cloudrunv2 { */ emptyDir?: outputs.cloudrunv2.ServiceTemplateVolumeEmptyDir; /** - * Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + * Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. * Structure is documented below. */ gcs?: outputs.cloudrunv2.ServiceTemplateVolumeGcs; @@ -18855,8 +19063,6 @@ export namespace cloudrunv2 { path: string; /** * If true, mount the NFS volume as read only - * - * - - - */ readOnly?: boolean; /** @@ -19218,6 +19424,151 @@ export namespace cloudtasks { version?: string; } + export interface QueueHttpTarget { + /** + * HTTP target headers. + * This map contains the header field names and values. + * Headers will be set when running the CreateTask and/or BufferTask. + * These headers represent a subset of the headers that will be configured for the task's HTTP request. + * Some HTTP request headers will be ignored or replaced. + * Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + * The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + * Structure is documented below. + */ + headerOverrides?: outputs.cloudtasks.QueueHttpTargetHeaderOverride[]; + /** + * The HTTP method to use for the request. + * When specified, it overrides HttpRequest for the task. + * Note that if the value is set to GET the body of the task will be ignored at execution time. + * Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + */ + httpMethod: string; + /** + * If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + * This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + * Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + * Structure is documented below. + */ + oauthToken?: outputs.cloudtasks.QueueHttpTargetOauthToken; + /** + * If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + * This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + * Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + * Structure is documented below. + */ + oidcToken?: outputs.cloudtasks.QueueHttpTargetOidcToken; + /** + * URI override. + * When specified, overrides the execution URI for all the tasks in the queue. + * Structure is documented below. + */ + uriOverride?: outputs.cloudtasks.QueueHttpTargetUriOverride; + } + + export interface QueueHttpTargetHeaderOverride { + /** + * Header embodying a key and a value. + * Structure is documented below. + */ + header: outputs.cloudtasks.QueueHttpTargetHeaderOverrideHeader; + } + + export interface QueueHttpTargetHeaderOverrideHeader { + /** + * The Key of the header. + */ + key: string; + /** + * The Value of the header. + */ + value: string; + } + + export interface QueueHttpTargetOauthToken { + /** + * OAuth scope to be used for generating OAuth access token. + * If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + */ + scope: string; + /** + * Service account email to be used for generating OAuth token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + */ + serviceAccountEmail: string; + } + + export interface QueueHttpTargetOidcToken { + /** + * Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + */ + audience: string; + /** + * Service account email to be used for generating OIDC token. + * The service account must be within the same project as the queue. + * The caller must have iam.serviceAccounts.actAs permission for the service account. + */ + serviceAccountEmail: string; + } + + export interface QueueHttpTargetUriOverride { + /** + * Host override. + * When specified, replaces the host part of the task URL. + * For example, if the task URL is "https://www.google.com", and host value + * is set to "example.net", the overridden URI will be changed to "https://example.net". + * Host value cannot be an empty string (INVALID_ARGUMENT). + */ + host?: string; + /** + * URI path. + * When specified, replaces the existing path of the task URL. + * Setting the path value to an empty string clears the URI path segment. + * Structure is documented below. + */ + pathOverride?: outputs.cloudtasks.QueueHttpTargetUriOverridePathOverride; + /** + * Port override. + * When specified, replaces the port part of the task URI. + * For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + * Note that the port value must be a positive integer. + * Setting the port to 0 (Zero) clears the URI port. + */ + port?: string; + /** + * URI query. + * When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + * Structure is documented below. + */ + queryOverride?: outputs.cloudtasks.QueueHttpTargetUriOverrideQueryOverride; + /** + * Scheme override. + * When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + * Possible values are: `HTTP`, `HTTPS`. + */ + scheme: string; + /** + * URI Override Enforce Mode + * When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + * Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + */ + uriOverrideEnforceMode: string; + } + + export interface QueueHttpTargetUriOverridePathOverride { + /** + * The URI path (e.g., /users/1234). Default is an empty string. + */ + path: string; + } + + export interface QueueHttpTargetUriOverrideQueryOverride { + /** + * The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + */ + queryParams: string; + } + export interface QueueIamBindingCondition { description?: string; expression: string; @@ -21400,7 +21751,7 @@ export namespace compute { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. */ ports?: string[]; @@ -21419,7 +21770,7 @@ export namespace compute { * is only applicable for UDP or TCP protocol. Each entry must be * either an integer or a range. If not specified, this rule * applies to connections through any port. - * Example inputs include: ["22"], ["80","443"], and + * Example inputs include: [22], [80, 443], and * ["12345-12349"]. */ ports?: string[]; @@ -23081,6 +23432,10 @@ export namespace compute { * Structure is documented below. */ initializeParams: outputs.compute.GetInstanceBootDiskInitializeParam[]; + /** + * The disk interface used for attaching this disk. One of `SCSI` or `NVME`. + */ + interface: string; /** * The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. */ @@ -26467,6 +26822,10 @@ export namespace compute { * Structure is documented below. */ initializeParams: outputs.compute.InstanceBootDiskInitializeParams; + /** + * The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) + */ + interface?: string; /** * The selfLink of the encryption key that is * stored in Google Cloud KMS to encrypt this disk. Only one of `kmsKeySelfLink` @@ -26625,6 +26984,10 @@ export namespace compute { * Parameters with which a disk was created alongside the instance. */ initializeParams: outputs.compute.InstanceFromMachineImageBootDiskInitializeParams; + /** + * The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) + */ + interface: string; /** * The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. */ @@ -27059,6 +27422,10 @@ export namespace compute { * Parameters with which a disk was created alongside the instance. */ initializeParams: outputs.compute.InstanceFromTemplateBootDiskInitializeParams; + /** + * The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attachedDisk and only used for specific cases, please don't specify this field without advice from Google.) + */ + interface: string; /** * The selfLink of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kmsKeySelfLink and diskEncryptionKeyRaw may be set. */ @@ -27797,7 +28164,7 @@ export namespace compute { subnetwork: string; /** * The project in which the subnetwork belongs. - * If the `subnetwork` is a self_link, this field is ignored in favor of the project + * If the `subnetwork` is a self_link, this field is set to the project * defined in the subnetwork self_link. If the `subnetwork` is a name and this * field is not provided, the provider project is used. */ @@ -28977,6 +29344,19 @@ export namespace compute { projectId: string; } + export interface NodeTemplateAccelerator { + /** + * The number of the guest accelerator cards exposed to this + * node template. + */ + acceleratorCount?: number; + /** + * Full or partial URL of the accelerator type resource to expose + * to this node template. + */ + acceleratorType?: string; + } + export interface NodeTemplateNodeTypeFlexibility { /** * Number of virtual CPUs to use. @@ -39321,6 +39701,10 @@ export namespace container { * One of `"none"` or `"static"`. Defaults to `none` when `kubeletConfig` is unset. */ cpuManagerPolicy: string; + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + */ + insecureKubeletReadonlyPortEnabled: string; /** * Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. */ @@ -39547,9 +39931,14 @@ export namespace container { export interface ClusterNodePoolAutoConfig { /** - * The network tag config for the cluster's automatically provisioned node pools. + * The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. */ networkTags?: outputs.container.ClusterNodePoolAutoConfigNetworkTags; + /** + * Kubelet configuration for Autopilot clusters. Currently, only `insecureKubeletReadonlyPortEnabled` is supported here. + * Structure is documented below. + */ + nodeKubeletConfig?: outputs.container.ClusterNodePoolAutoConfigNodeKubeletConfig; /** * A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. */ @@ -39563,6 +39952,13 @@ export namespace container { tags?: string[]; } + export interface ClusterNodePoolAutoConfigNodeKubeletConfig { + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + */ + insecureKubeletReadonlyPortEnabled: string; + } + export interface ClusterNodePoolAutoscaling { /** * Location policy specifies the algorithm used when scaling-up the node pool. "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduces preemption risk for Spot VMs. @@ -39602,6 +39998,10 @@ export namespace container { * The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming) across all the node pools within the cluster. Structure is documented below. */ gcfsConfig?: outputs.container.ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfig; + /** + * Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + */ + insecureKubeletReadonlyPortEnabled: string; /** * The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. */ @@ -40151,6 +40551,10 @@ export namespace container { * One of `"none"` or `"static"`. Defaults to `none` when `kubeletConfig` is unset. */ cpuManagerPolicy: string; + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + */ + insecureKubeletReadonlyPortEnabled: string; /** * Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. */ @@ -41521,6 +41925,10 @@ export namespace container { * Control the CPU management policy on the node. */ cpuManagerPolicy: string; + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + */ + insecureKubeletReadonlyPortEnabled: string; /** * Controls the maximum number of processes allowed to run in a pod. */ @@ -41702,6 +42110,10 @@ export namespace container { * Collection of Compute Engine network tags that can be applied to a node's underlying VM instance. */ networkTags: outputs.container.GetClusterNodePoolAutoConfigNetworkTag[]; + /** + * Node kubelet configs. + */ + nodeKubeletConfigs: outputs.container.GetClusterNodePoolAutoConfigNodeKubeletConfig[]; /** * A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. */ @@ -41715,6 +42127,13 @@ export namespace container { tags: string[]; } + export interface GetClusterNodePoolAutoConfigNodeKubeletConfig { + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + */ + insecureKubeletReadonlyPortEnabled: string; + } + export interface GetClusterNodePoolAutoscaling { /** * Location policy specifies the algorithm used when scaling-up the node pool. "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduces preemption risk for Spot VMs. @@ -41754,6 +42173,10 @@ export namespace container { * GCFS configuration for this node. */ gcfsConfigs: outputs.container.GetClusterNodePoolDefaultNodeConfigDefaultGcfsConfig[]; + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + */ + insecureKubeletReadonlyPortEnabled: string; /** * Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. */ @@ -42214,6 +42637,10 @@ export namespace container { * Control the CPU management policy on the node. */ cpuManagerPolicy: string; + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + */ + insecureKubeletReadonlyPortEnabled: string; /** * Controls the maximum number of processes allowed to run in a pod. */ @@ -43036,6 +43463,10 @@ export namespace container { * Control the CPU management policy on the node. */ cpuManagerPolicy: string; + /** + * Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + */ + insecureKubeletReadonlyPortEnabled: string; /** * Controls the maximum number of processes allowed to run in a pod. */ @@ -43582,33 +44013,33 @@ export namespace databasemigrationservice { */ cloudSqlId?: string; /** - * Required. The IP or hostname of the source MySQL database. + * The IP or hostname of the source MySQL database. */ - host: string; + host?: string; /** - * Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. */ - password: string; + password?: string; /** * (Output) * Output only. Indicates If this connection profile password is stored. */ passwordSet: boolean; /** - * Required. The network port of the source MySQL database. + * The network port of the source MySQL database. */ - port: number; + port?: number; /** * SSL configuration for the destination to connect to the source database. * Structure is documented below. */ ssl?: outputs.databasemigrationservice.ConnectionProfileMysqlSsl; /** - * Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. */ - username: string; + username?: string; } export interface ConnectionProfileMysqlSsl { @@ -43749,43 +44180,47 @@ export namespace databasemigrationservice { } export interface ConnectionProfilePostgresql { + /** + * If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. + */ + alloydbClusterId?: string; /** * If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. */ cloudSqlId?: string; /** - * Required. The IP or hostname of the source MySQL database. + * The IP or hostname of the source MySQL database. */ - host: string; + host?: string; /** * (Output) * Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with. */ networkArchitecture: string; /** - * Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + * Input only. The password for the user that Database Migration Service will be using to connect to the database. * This field is not returned on request, and the value is encrypted when stored in Database Migration Service. * **Note**: This property is sensitive and will not be displayed in the plan. */ - password: string; + password?: string; /** * (Output) * Output only. Indicates If this connection profile password is stored. */ passwordSet: boolean; /** - * Required. The network port of the source MySQL database. + * The network port of the source MySQL database. */ - port: number; + port?: number; /** * SSL configuration for the destination to connect to the source database. * Structure is documented below. */ ssl?: outputs.databasemigrationservice.ConnectionProfilePostgresqlSsl; /** - * Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + * The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. */ - username: string; + username?: string; } export interface ConnectionProfilePostgresqlSsl { @@ -47956,6 +48391,11 @@ export namespace dataloss { * Structure is documented below. */ pubSubNotification?: outputs.dataloss.PreventionDiscoveryConfigActionPubSubNotification; + /** + * Publish a message into the Pub/Sub topic. + * Structure is documented below. + */ + tagResources?: outputs.dataloss.PreventionDiscoveryConfigActionTagResources; } export interface PreventionDiscoveryConfigActionExportData { @@ -48037,6 +48477,51 @@ export namespace dataloss { minimumSensitivityScore?: string; } + export interface PreventionDiscoveryConfigActionTagResources { + /** + * Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + */ + lowerDataRiskToLow?: boolean; + /** + * The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + * Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + */ + profileGenerationsToTags?: string[]; + /** + * The tags to associate with different conditions. + * Structure is documented below. + */ + tagConditions?: outputs.dataloss.PreventionDiscoveryConfigActionTagResourcesTagCondition[]; + } + + export interface PreventionDiscoveryConfigActionTagResourcesTagCondition { + /** + * Conditions attaching the tag to a resource on its profile having this sensitivity score. + * Structure is documented below. + */ + sensitivityScore?: outputs.dataloss.PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore; + /** + * The tag value to attach to resources. + * Structure is documented below. + */ + tag?: outputs.dataloss.PreventionDiscoveryConfigActionTagResourcesTagConditionTag; + } + + export interface PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore { + /** + * The sensitivity score applied to the resource. + * Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + */ + score: string; + } + + export interface PreventionDiscoveryConfigActionTagResourcesTagConditionTag { + /** + * The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + */ + namespacedValue?: string; + } + export interface PreventionDiscoveryConfigError { /** * A list of messages that carry the error details. @@ -48131,6 +48616,11 @@ export namespace dataloss { } export interface PreventionDiscoveryConfigTargetBigQueryTargetCadence { + /** + * Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + * Structure is documented below. + */ + inspectTemplateModifiedCadence?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence; /** * Governs when to update data profiles when a schema is modified * Structure is documented below. @@ -48143,6 +48633,14 @@ export namespace dataloss { tableModifiedCadence?: outputs.dataloss.PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadence; } + export interface PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence { + /** + * How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + */ + frequency?: string; + } + export interface PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence { /** * Frequency to regenerate data profiles when the schema is modified. Defaults to monthly. @@ -48388,6 +48886,11 @@ export namespace dataloss { } export interface PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence { + /** + * Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + * Structure is documented below. + */ + inspectTemplateModifiedCadence?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence; /** * Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. @@ -48400,6 +48903,14 @@ export namespace dataloss { schemaModifiedCadence?: outputs.dataloss.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence; } + export interface PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence { + /** + * How frequently data profiles can be updated when the template is modified. Defaults to never. + * Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + */ + frequency: string; + } + export interface PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence { /** * Frequency to regenerate data profiles when the schema is modified. Defaults to monthly. @@ -53596,7 +54107,7 @@ export namespace dataproc { */ softwareConfig?: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigSoftwareConfig; /** - * A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + * A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). */ stagingBucket?: string; /** @@ -53642,7 +54153,7 @@ export namespace dataproc { */ internalIpOnly: boolean; /** - * The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + * The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). */ metadata?: {[key: string]: string}; /** @@ -53678,7 +54189,7 @@ export namespace dataproc { */ subnetwork?: string; /** - * The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + * The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). */ tags?: string[]; /** @@ -53748,26 +54259,26 @@ export namespace dataproc { */ executableFile?: string; /** - * Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + * Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. */ executionTimeout?: string; } export interface WorkflowTemplatePlacementManagedClusterConfigLifecycleConfig { /** - * The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). */ autoDeleteTime?: string; /** - * The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). */ autoDeleteTtl?: string; /** - * The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + * The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). */ idleDeleteTtl?: string; /** - * Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + * Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). */ idleStartTime: string; } @@ -53802,7 +54313,7 @@ export namespace dataproc { */ managedGroupConfigs: outputs.dataproc.WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfig[]; /** - * Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + * Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */ minCpuPlatform: string; /** @@ -55328,6 +55839,10 @@ export namespace datastream { } export interface StreamSourceConfigSqlServerSourceConfig { + /** + * CDC reader reads from change tables. + */ + changeTables?: outputs.datastream.StreamSourceConfigSqlServerSourceConfigChangeTables; /** * SQL Server objects to exclude from the stream. * Structure is documented below. @@ -55346,6 +55861,13 @@ export namespace datastream { * Max concurrent CDC tasks. */ maxConcurrentCdcTasks: number; + /** + * CDC reader reads from transaction logs. + */ + transactionLogs?: outputs.datastream.StreamSourceConfigSqlServerSourceConfigTransactionLogs; + } + + export interface StreamSourceConfigSqlServerSourceConfigChangeTables { } export interface StreamSourceConfigSqlServerSourceConfigExcludeObjects { @@ -55496,6 +56018,9 @@ export namespace datastream { scale: number; } + export interface StreamSourceConfigSqlServerSourceConfigTransactionLogs { + } + } export namespace deploymentmanager { @@ -57740,6 +58265,11 @@ export namespace discoveryengine { } export interface DataStoreDocumentProcessingConfig { + /** + * Whether chunking mode is enabled. + * Structure is documented below. + */ + chunkingConfig?: outputs.discoveryengine.DataStoreDocumentProcessingConfigChunkingConfig; /** * Configurations for default Document parser. If not specified, this resource * will be configured to use a default DigitalParsingConfig, and the default parsing @@ -57759,11 +58289,36 @@ export namespace discoveryengine { parsingConfigOverrides?: outputs.discoveryengine.DataStoreDocumentProcessingConfigParsingConfigOverride[]; } + export interface DataStoreDocumentProcessingConfigChunkingConfig { + /** + * Configuration for the layout based chunking. + * Structure is documented below. + */ + layoutBasedChunkingConfig?: outputs.discoveryengine.DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig; + } + + export interface DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig { + /** + * The token size limit for each chunk. + * Supported values: 100-500 (inclusive). Default value: 500. + */ + chunkSize?: number; + /** + * Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + * Default value: False. + */ + includeAncestorHeadings?: boolean; + } + export interface DataStoreDocumentProcessingConfigDefaultParsingConfig { /** * Configurations applied to digital parser. */ digitalParsingConfig?: outputs.discoveryengine.DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig; + /** + * Configurations applied to layout parser. + */ + layoutParsingConfig?: outputs.discoveryengine.DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig; /** * Configurations applied to OCR parser. Currently it only applies to PDFs. * Structure is documented below. @@ -57774,6 +58329,9 @@ export namespace discoveryengine { export interface DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig { } + export interface DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig { + } + export interface DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig { /** * If true, will use native text instead of OCR text on pages containing native text. @@ -57790,6 +58348,10 @@ export namespace discoveryengine { * The identifier for this object. Format specified above. */ fileType: string; + /** + * Configurations applied to layout parser. + */ + layoutParsingConfig?: outputs.discoveryengine.DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig; /** * Configurations applied to OCR parser. Currently it only applies to PDFs. * Structure is documented below. @@ -57800,6 +58362,9 @@ export namespace discoveryengine { export interface DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfig { } + export interface DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig { + } + export interface DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig { /** * If true, will use native text instead of OCR text on pages containing native text. @@ -61138,7 +61703,9 @@ export namespace gkehub { export interface FeatureMembershipConfigmanagement { /** + * (Optional, Deprecated) * Binauthz configuration for the cluster. Structure is documented below. + * This field will be ignored and should not be set. */ binauthz: outputs.gkehub.FeatureMembershipConfigmanagementBinauthz; /** @@ -61147,6 +61714,10 @@ export namespace gkehub { configSync?: outputs.gkehub.FeatureMembershipConfigmanagementConfigSync; /** * Hierarchy Controller configuration for the cluster. Structure is documented below. + * Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. + * Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. + * Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) + * to migrate from Hierarchy Controller to HNC. */ hierarchyController?: outputs.gkehub.FeatureMembershipConfigmanagementHierarchyController; /** @@ -61155,6 +61726,8 @@ export namespace gkehub { management: string; /** * Policy Controller configuration for the cluster. Structure is documented below. + * Configuring Policy Controller through the configmanagement feature is no longer recommended. + * Use the policycontroller feature instead. */ policyController?: outputs.gkehub.FeatureMembershipConfigmanagementPolicyController; /** @@ -64203,6 +64776,48 @@ export namespace iam { idpMetadataXml: string; } + export interface GetWorkloadIdentityPoolProviderX509 { + /** + * A Trust store, use this trust store as a wrapper to config the trust + * anchor and optional intermediate cas to help build the trust chain for + * the incoming end entity certificate. Follow the x509 guidelines to + * define those PEM encoded certs. Only 1 trust store is currently + * supported. + */ + trustStores: outputs.iam.GetWorkloadIdentityPoolProviderX509TrustStore[]; + } + + export interface GetWorkloadIdentityPoolProviderX509TrustStore { + /** + * Set of intermediate CA certificates used for building the trust chain to + * trust anchor. + * IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + */ + intermediateCas: outputs.iam.GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa[]; + /** + * List of Trust Anchors to be used while performing validation + * against a given TrustStore. The incoming end entity's certificate + * must be chained up to one of the trust anchors here. + */ + trustAnchors: outputs.iam.GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor[]; + } + + export interface GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCa { + /** + * PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + */ + pemCertificate: string; + } + + export interface GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchor { + /** + * PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + */ + pemCertificate: string; + } + export interface WorkforcePoolAccessRestrictions { /** * Services allowed for web sign-in with the workforce pool. @@ -64449,10 +65064,56 @@ export namespace iam { export interface WorkloadIdentityPoolProviderSaml { /** * SAML Identity provider configuration metadata xml doc. + * + * The `x509` block supports: */ idpMetadataXml: string; } + export interface WorkloadIdentityPoolProviderX509 { + /** + * A Trust store, use this trust store as a wrapper to config the trust + * anchor and optional intermediate cas to help build the trust chain for + * the incoming end entity certificate. Follow the x509 guidelines to + * define those PEM encoded certs. Only 1 trust store is currently + * supported. + */ + trustStore: outputs.iam.WorkloadIdentityPoolProviderX509TrustStore; + } + + export interface WorkloadIdentityPoolProviderX509TrustStore { + /** + * Set of intermediate CA certificates used for building the trust chain to + * trust anchor. + * IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + * Structure is documented below. + */ + intermediateCas?: outputs.iam.WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa[]; + /** + * List of Trust Anchors to be used while performing validation + * against a given TrustStore. The incoming end entity's certificate + * must be chained up to one of the trust anchors here. + * Structure is documented below. + */ + trustAnchors: outputs.iam.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor[]; + } + + export interface WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa { + /** + * PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + */ + pemCertificate?: string; + } + + export interface WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor { + /** + * PEM certificate of the PKI used for validation. Must only contain one + * ca certificate(either root or intermediate cert). + */ + pemCertificate?: string; + } + } export namespace iap { @@ -66137,6 +66798,57 @@ export namespace kms { subjectAlternativeDnsNames: string[]; } + export interface GetCryptoKeyLatestVersionPublicKey { + /** + * The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + */ + algorithm: string; + /** + * The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + */ + pem: string; + } + + export interface GetCryptoKeyVersionsPublicKey { + /** + * The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + */ + algorithm: string; + /** + * The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + */ + pem: string; + } + + export interface GetCryptoKeyVersionsVersion { + /** + * The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + */ + algorithm: string; + /** + * The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + * `gcp.kms.CryptoKey` resource/datasource. + */ + cryptoKey: string; + id: string; + name: string; + protectionLevel: string; + publicKeys: outputs.kms.GetCryptoKeyVersionsVersionPublicKey[]; + state: string; + version: number; + } + + export interface GetCryptoKeyVersionsVersionPublicKey { + /** + * The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + */ + algorithm: string; + /** + * The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + */ + pem: string; + } + export interface GetCryptoKeysKey { /** * The resource name of the backend environment associated with all CryptoKeyVersions within this CryptoKey. @@ -69347,6 +70059,10 @@ export namespace networkconnectivity { * IP ranges encompassing the subnets to be excluded from peering. */ excludeExportRanges?: string[]; + /** + * IP ranges allowed to be included from peering. + */ + includeExportRanges?: string[]; /** * The URI of the VPC network resource. */ @@ -74532,6 +75248,10 @@ export namespace pubsub { * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ maxDuration: string; + /** + * The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + */ + maxMessages: number; /** * The service account to use to write to Cloud Storage. If not specified, the Pub/Sub * [service agent](https://cloud.google.com/iam/docs/service-agents), @@ -74545,6 +75265,10 @@ export namespace pubsub { } export interface GetSubscriptionCloudStorageConfigAvroConfig { + /** + * When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + */ + useTopicSchema: boolean; /** * When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. */ @@ -74872,6 +75596,10 @@ export namespace pubsub { * A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". */ maxDuration?: string; + /** + * The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + */ + maxMessages?: number; /** * The service account to use to write to Cloud Storage. If not specified, the Pub/Sub * [service agent](https://cloud.google.com/iam/docs/service-agents), @@ -74886,6 +75614,10 @@ export namespace pubsub { } export interface SubscriptionCloudStorageConfigAvroConfig { + /** + * When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + */ + useTopicSchema?: boolean; /** * When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. */ @@ -75195,6 +75927,105 @@ export namespace redis { network?: string; } + export interface ClusterMaintenancePolicy { + /** + * (Output) + * Output only. The time when the policy was created. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + */ + createTime: string; + /** + * (Output) + * Output only. The time when the policy was last updated. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + */ + updateTime: string; + /** + * Optional. Maintenance window that is applied to resources covered by this policy. + * Minimum 1. For the current version, the maximum number + * of weeklyWindow is expected to be one. + * Structure is documented below. + */ + weeklyMaintenanceWindows?: outputs.redis.ClusterMaintenancePolicyWeeklyMaintenanceWindow[]; + } + + export interface ClusterMaintenancePolicyWeeklyMaintenanceWindow { + /** + * Required. The day of week that maintenance updates occur. + * - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + * - MONDAY: Monday + * - TUESDAY: Tuesday + * - WEDNESDAY: Wednesday + * - THURSDAY: Thursday + * - FRIDAY: Friday + * - SATURDAY: Saturday + * - SUNDAY: Sunday + * Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + */ + day: string; + /** + * (Output) + * Output only. Duration of the maintenance window. + * The current window is fixed at 1 hour. + * A duration in seconds with up to nine fractional digits, + * terminated by 's'. Example: "3.5s". + */ + duration: string; + /** + * Required. Start time of the window in UTC time. + * Structure is documented below. + */ + startTime: outputs.redis.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime; + } + + export interface ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime { + /** + * Hours of day in 24 hour format. Should be from 0 to 23. + * An API may choose to allow the value "24:00:00" for scenarios like business closing time. + */ + hours?: number; + /** + * Minutes of hour of day. Must be from 0 to 59. + */ + minutes?: number; + /** + * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + */ + nanos?: number; + /** + * Seconds of minutes of the time. Must normally be from 0 to 59. + * An API may allow the value 60 if it allows leap-seconds. + */ + seconds?: number; + } + + export interface ClusterMaintenanceSchedule { + /** + * (Output) + * Output only. The end time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + */ + endTime: string; + /** + * (Output) + * Output only. The deadline that the maintenance schedule start time + * can not go beyond, including reschedule. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + */ + scheduleDeadlineTime: string; + /** + * (Output) + * Output only. The start time of any upcoming scheduled maintenance for this cluster. + * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + * resolution and up to nine fractional digits. + */ + startTime: string; + } + export interface ClusterPscConfig { /** * Required. The consumer network where the network address of diff --git a/sdk/python/pulumi_gcp/__init__.py b/sdk/python/pulumi_gcp/__init__.py index 3ce028eec9..f2892825f4 100644 --- a/sdk/python/pulumi_gcp/__init__.py +++ b/sdk/python/pulumi_gcp/__init__.py @@ -1031,6 +1031,14 @@ "gcp:assuredworkloads/workload:Workload": "Workload" } }, + { + "pkg": "gcp", + "mod": "backupdisasterrecovery/backupVault", + "fqn": "pulumi_gcp.backupdisasterrecovery", + "classes": { + "gcp:backupdisasterrecovery/backupVault:BackupVault": "BackupVault" + } + }, { "pkg": "gcp", "mod": "backupdisasterrecovery/managementServer", @@ -7159,6 +7167,14 @@ "gcp:securitycenter/v2FolderNotificationConfig:V2FolderNotificationConfig": "V2FolderNotificationConfig" } }, + { + "pkg": "gcp", + "mod": "securitycenter/v2FolderSccBigQueryExport", + "fqn": "pulumi_gcp.securitycenter", + "classes": { + "gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport": "V2FolderSccBigQueryExport" + } + }, { "pkg": "gcp", "mod": "securitycenter/v2OrganizationMuteConfig", @@ -7231,6 +7247,14 @@ "gcp:securitycenter/v2ProjectNotificationConfig:V2ProjectNotificationConfig": "V2ProjectNotificationConfig" } }, + { + "pkg": "gcp", + "mod": "securitycenter/v2ProjectSccBigQueryExport", + "fqn": "pulumi_gcp.securitycenter", + "classes": { + "gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport": "V2ProjectSccBigQueryExport" + } + }, { "pkg": "gcp", "mod": "securityposture/posture", diff --git a/sdk/python/pulumi_gcp/alloydb/_inputs.py b/sdk/python/pulumi_gcp/alloydb/_inputs.py index 70f579da0c..ae3afff1fc 100644 --- a/sdk/python/pulumi_gcp/alloydb/_inputs.py +++ b/sdk/python/pulumi_gcp/alloydb/_inputs.py @@ -67,6 +67,8 @@ 'ClusterRestoreContinuousBackupSourceArgsDict', 'ClusterSecondaryConfigArgs', 'ClusterSecondaryConfigArgsDict', + 'ClusterTrialMetadataArgs', + 'ClusterTrialMetadataArgsDict', 'InstanceClientConnectionConfigArgs', 'InstanceClientConnectionConfigArgsDict', 'InstanceClientConnectionConfigSslConfigArgs', @@ -1585,6 +1587,98 @@ def primary_cluster_name(self, value: pulumi.Input[str]): pulumi.set(self, "primary_cluster_name", value) +if not MYPY: + class ClusterTrialMetadataArgsDict(TypedDict): + end_time: NotRequired[pulumi.Input[str]] + """ + End time of the trial cluster. + """ + grace_end_time: NotRequired[pulumi.Input[str]] + """ + Grace end time of the trial cluster. + """ + start_time: NotRequired[pulumi.Input[str]] + """ + Start time of the trial cluster. + """ + upgrade_time: NotRequired[pulumi.Input[str]] + """ + Upgrade time of the trial cluster to standard cluster. + """ +elif False: + ClusterTrialMetadataArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class ClusterTrialMetadataArgs: + def __init__(__self__, *, + end_time: Optional[pulumi.Input[str]] = None, + grace_end_time: Optional[pulumi.Input[str]] = None, + start_time: Optional[pulumi.Input[str]] = None, + upgrade_time: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] end_time: End time of the trial cluster. + :param pulumi.Input[str] grace_end_time: Grace end time of the trial cluster. + :param pulumi.Input[str] start_time: Start time of the trial cluster. + :param pulumi.Input[str] upgrade_time: Upgrade time of the trial cluster to standard cluster. + """ + if end_time is not None: + pulumi.set(__self__, "end_time", end_time) + if grace_end_time is not None: + pulumi.set(__self__, "grace_end_time", grace_end_time) + if start_time is not None: + pulumi.set(__self__, "start_time", start_time) + if upgrade_time is not None: + pulumi.set(__self__, "upgrade_time", upgrade_time) + + @property + @pulumi.getter(name="endTime") + def end_time(self) -> Optional[pulumi.Input[str]]: + """ + End time of the trial cluster. + """ + return pulumi.get(self, "end_time") + + @end_time.setter + def end_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "end_time", value) + + @property + @pulumi.getter(name="graceEndTime") + def grace_end_time(self) -> Optional[pulumi.Input[str]]: + """ + Grace end time of the trial cluster. + """ + return pulumi.get(self, "grace_end_time") + + @grace_end_time.setter + def grace_end_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "grace_end_time", value) + + @property + @pulumi.getter(name="startTime") + def start_time(self) -> Optional[pulumi.Input[str]]: + """ + Start time of the trial cluster. + """ + return pulumi.get(self, "start_time") + + @start_time.setter + def start_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "start_time", value) + + @property + @pulumi.getter(name="upgradeTime") + def upgrade_time(self) -> Optional[pulumi.Input[str]]: + """ + Upgrade time of the trial cluster to standard cluster. + """ + return pulumi.get(self, "upgrade_time") + + @upgrade_time.setter + def upgrade_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "upgrade_time", value) + + if not MYPY: class InstanceClientConnectionConfigArgsDict(TypedDict): require_connectors: NotRequired[pulumi.Input[bool]] diff --git a/sdk/python/pulumi_gcp/alloydb/cluster.py b/sdk/python/pulumi_gcp/alloydb/cluster.py index 3c76fab16e..44a68b0e05 100644 --- a/sdk/python/pulumi_gcp/alloydb/cluster.py +++ b/sdk/python/pulumi_gcp/alloydb/cluster.py @@ -40,7 +40,8 @@ def __init__(__self__, *, psc_config: Optional[pulumi.Input['ClusterPscConfigArgs']] = None, restore_backup_source: Optional[pulumi.Input['ClusterRestoreBackupSourceArgs']] = None, restore_continuous_backup_source: Optional[pulumi.Input['ClusterRestoreContinuousBackupSourceArgs']] = None, - secondary_config: Optional[pulumi.Input['ClusterSecondaryConfigArgs']] = None): + secondary_config: Optional[pulumi.Input['ClusterSecondaryConfigArgs']] = None, + subscription_type: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a Cluster resource. :param pulumi.Input[str] cluster_id: The ID of the alloydb cluster. @@ -65,6 +66,7 @@ def __init__(__self__, *, :param pulumi.Input[str] deletion_policy: Policy to determine if the cluster should be deleted forcefully. Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + Possible values: DEFAULT, FORCE :param pulumi.Input[str] display_name: User-settable and human-readable display name for the Cluster. :param pulumi.Input['ClusterEncryptionConfigArgs'] encryption_config: EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key). Structure is documented below. @@ -88,6 +90,8 @@ def __init__(__self__, *, Structure is documented below. :param pulumi.Input['ClusterSecondaryConfigArgs'] secondary_config: Configuration of the secondary cluster for Cross Region Replication. This should be set if and only if the cluster is of type SECONDARY. Structure is documented below. + :param pulumi.Input[str] subscription_type: The subscrition type of cluster. + Possible values are: `TRIAL`, `STANDARD`. """ pulumi.set(__self__, "cluster_id", cluster_id) pulumi.set(__self__, "location", location) @@ -127,6 +131,8 @@ def __init__(__self__, *, pulumi.set(__self__, "restore_continuous_backup_source", restore_continuous_backup_source) if secondary_config is not None: pulumi.set(__self__, "secondary_config", secondary_config) + if subscription_type is not None: + pulumi.set(__self__, "subscription_type", subscription_type) @property @pulumi.getter(name="clusterId") @@ -231,6 +237,7 @@ def deletion_policy(self) -> Optional[pulumi.Input[str]]: Policy to determine if the cluster should be deleted forcefully. Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + Possible values: DEFAULT, FORCE """ return pulumi.get(self, "deletion_policy") @@ -393,6 +400,19 @@ def secondary_config(self) -> Optional[pulumi.Input['ClusterSecondaryConfigArgs' def secondary_config(self, value: Optional[pulumi.Input['ClusterSecondaryConfigArgs']]): pulumi.set(self, "secondary_config", value) + @property + @pulumi.getter(name="subscriptionType") + def subscription_type(self) -> Optional[pulumi.Input[str]]: + """ + The subscrition type of cluster. + Possible values are: `TRIAL`, `STANDARD`. + """ + return pulumi.get(self, "subscription_type") + + @subscription_type.setter + def subscription_type(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "subscription_type", value) + @pulumi.input_type class _ClusterState: @@ -427,6 +447,8 @@ def __init__(__self__, *, restore_continuous_backup_source: Optional[pulumi.Input['ClusterRestoreContinuousBackupSourceArgs']] = None, secondary_config: Optional[pulumi.Input['ClusterSecondaryConfigArgs']] = None, state: Optional[pulumi.Input[str]] = None, + subscription_type: Optional[pulumi.Input[str]] = None, + trial_metadatas: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterTrialMetadataArgs']]]] = None, uid: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering Cluster resources. @@ -452,6 +474,7 @@ def __init__(__self__, *, :param pulumi.Input[str] deletion_policy: Policy to determine if the cluster should be deleted forcefully. Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + Possible values: DEFAULT, FORCE :param pulumi.Input[str] display_name: User-settable and human-readable display name for the Cluster. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] effective_labels: All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. :param pulumi.Input['ClusterEncryptionConfigArgs'] encryption_config: EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key). @@ -492,6 +515,10 @@ def __init__(__self__, *, :param pulumi.Input['ClusterSecondaryConfigArgs'] secondary_config: Configuration of the secondary cluster for Cross Region Replication. This should be set if and only if the cluster is of type SECONDARY. Structure is documented below. :param pulumi.Input[str] state: Output only. The current serving state of the cluster. + :param pulumi.Input[str] subscription_type: The subscrition type of cluster. + Possible values are: `TRIAL`, `STANDARD`. + :param pulumi.Input[Sequence[pulumi.Input['ClusterTrialMetadataArgs']]] trial_metadatas: Contains information and all metadata related to TRIAL clusters. + Structure is documented below. :param pulumi.Input[str] uid: The system-generated UID of the resource. """ if annotations is not None: @@ -554,6 +581,10 @@ def __init__(__self__, *, pulumi.set(__self__, "secondary_config", secondary_config) if state is not None: pulumi.set(__self__, "state", state) + if subscription_type is not None: + pulumi.set(__self__, "subscription_type", subscription_type) + if trial_metadatas is not None: + pulumi.set(__self__, "trial_metadatas", trial_metadatas) if uid is not None: pulumi.set(__self__, "uid", uid) @@ -671,6 +702,7 @@ def deletion_policy(self) -> Optional[pulumi.Input[str]]: Policy to determine if the cluster should be deleted forcefully. Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + Possible values: DEFAULT, FORCE """ return pulumi.get(self, "deletion_policy") @@ -947,6 +979,32 @@ def state(self) -> Optional[pulumi.Input[str]]: def state(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "state", value) + @property + @pulumi.getter(name="subscriptionType") + def subscription_type(self) -> Optional[pulumi.Input[str]]: + """ + The subscrition type of cluster. + Possible values are: `TRIAL`, `STANDARD`. + """ + return pulumi.get(self, "subscription_type") + + @subscription_type.setter + def subscription_type(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "subscription_type", value) + + @property + @pulumi.getter(name="trialMetadatas") + def trial_metadatas(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterTrialMetadataArgs']]]]: + """ + Contains information and all metadata related to TRIAL clusters. + Structure is documented below. + """ + return pulumi.get(self, "trial_metadatas") + + @trial_metadatas.setter + def trial_metadatas(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterTrialMetadataArgs']]]]): + pulumi.set(self, "trial_metadatas", value) + @property @pulumi.getter def uid(self) -> Optional[pulumi.Input[str]]: @@ -985,6 +1043,7 @@ def __init__(__self__, restore_backup_source: Optional[pulumi.Input[Union['ClusterRestoreBackupSourceArgs', 'ClusterRestoreBackupSourceArgsDict']]] = None, restore_continuous_backup_source: Optional[pulumi.Input[Union['ClusterRestoreContinuousBackupSourceArgs', 'ClusterRestoreContinuousBackupSourceArgsDict']]] = None, secondary_config: Optional[pulumi.Input[Union['ClusterSecondaryConfigArgs', 'ClusterSecondaryConfigArgsDict']]] = None, + subscription_type: Optional[pulumi.Input[str]] = None, __props__=None): """ ## Example Usage @@ -1149,6 +1208,7 @@ def __init__(__self__, :param pulumi.Input[str] deletion_policy: Policy to determine if the cluster should be deleted forcefully. Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + Possible values: DEFAULT, FORCE :param pulumi.Input[str] display_name: User-settable and human-readable display name for the Cluster. :param pulumi.Input[Union['ClusterEncryptionConfigArgs', 'ClusterEncryptionConfigArgsDict']] encryption_config: EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key). Structure is documented below. @@ -1176,6 +1236,8 @@ def __init__(__self__, Structure is documented below. :param pulumi.Input[Union['ClusterSecondaryConfigArgs', 'ClusterSecondaryConfigArgsDict']] secondary_config: Configuration of the secondary cluster for Cross Region Replication. This should be set if and only if the cluster is of type SECONDARY. Structure is documented below. + :param pulumi.Input[str] subscription_type: The subscrition type of cluster. + Possible values are: `TRIAL`, `STANDARD`. """ ... @overload @@ -1361,6 +1423,7 @@ def _internal_init(__self__, restore_backup_source: Optional[pulumi.Input[Union['ClusterRestoreBackupSourceArgs', 'ClusterRestoreBackupSourceArgsDict']]] = None, restore_continuous_backup_source: Optional[pulumi.Input[Union['ClusterRestoreContinuousBackupSourceArgs', 'ClusterRestoreContinuousBackupSourceArgsDict']]] = None, secondary_config: Optional[pulumi.Input[Union['ClusterSecondaryConfigArgs', 'ClusterSecondaryConfigArgsDict']]] = None, + subscription_type: Optional[pulumi.Input[str]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): @@ -1394,6 +1457,7 @@ def _internal_init(__self__, __props__.__dict__["restore_backup_source"] = restore_backup_source __props__.__dict__["restore_continuous_backup_source"] = restore_continuous_backup_source __props__.__dict__["secondary_config"] = secondary_config + __props__.__dict__["subscription_type"] = subscription_type __props__.__dict__["backup_sources"] = None __props__.__dict__["continuous_backup_infos"] = None __props__.__dict__["effective_annotations"] = None @@ -1404,6 +1468,7 @@ def _internal_init(__self__, __props__.__dict__["pulumi_labels"] = None __props__.__dict__["reconciling"] = None __props__.__dict__["state"] = None + __props__.__dict__["trial_metadatas"] = None __props__.__dict__["uid"] = None secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["effectiveLabels", "pulumiLabels"]) opts = pulumi.ResourceOptions.merge(opts, secret_opts) @@ -1447,6 +1512,8 @@ def get(resource_name: str, restore_continuous_backup_source: Optional[pulumi.Input[Union['ClusterRestoreContinuousBackupSourceArgs', 'ClusterRestoreContinuousBackupSourceArgsDict']]] = None, secondary_config: Optional[pulumi.Input[Union['ClusterSecondaryConfigArgs', 'ClusterSecondaryConfigArgsDict']]] = None, state: Optional[pulumi.Input[str]] = None, + subscription_type: Optional[pulumi.Input[str]] = None, + trial_metadatas: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ClusterTrialMetadataArgs', 'ClusterTrialMetadataArgsDict']]]]] = None, uid: Optional[pulumi.Input[str]] = None) -> 'Cluster': """ Get an existing Cluster resource's state with the given name, id, and optional extra @@ -1477,6 +1544,7 @@ def get(resource_name: str, :param pulumi.Input[str] deletion_policy: Policy to determine if the cluster should be deleted forcefully. Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + Possible values: DEFAULT, FORCE :param pulumi.Input[str] display_name: User-settable and human-readable display name for the Cluster. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] effective_labels: All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. :param pulumi.Input[Union['ClusterEncryptionConfigArgs', 'ClusterEncryptionConfigArgsDict']] encryption_config: EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key). @@ -1517,6 +1585,10 @@ def get(resource_name: str, :param pulumi.Input[Union['ClusterSecondaryConfigArgs', 'ClusterSecondaryConfigArgsDict']] secondary_config: Configuration of the secondary cluster for Cross Region Replication. This should be set if and only if the cluster is of type SECONDARY. Structure is documented below. :param pulumi.Input[str] state: Output only. The current serving state of the cluster. + :param pulumi.Input[str] subscription_type: The subscrition type of cluster. + Possible values are: `TRIAL`, `STANDARD`. + :param pulumi.Input[Sequence[pulumi.Input[Union['ClusterTrialMetadataArgs', 'ClusterTrialMetadataArgsDict']]]] trial_metadatas: Contains information and all metadata related to TRIAL clusters. + Structure is documented below. :param pulumi.Input[str] uid: The system-generated UID of the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) @@ -1553,6 +1625,8 @@ def get(resource_name: str, __props__.__dict__["restore_continuous_backup_source"] = restore_continuous_backup_source __props__.__dict__["secondary_config"] = secondary_config __props__.__dict__["state"] = state + __props__.__dict__["subscription_type"] = subscription_type + __props__.__dict__["trial_metadatas"] = trial_metadatas __props__.__dict__["uid"] = uid return Cluster(resource_name, opts=opts, __props__=__props__) @@ -1638,6 +1712,7 @@ def deletion_policy(self) -> pulumi.Output[Optional[str]]: Policy to determine if the cluster should be deleted forcefully. Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + Possible values: DEFAULT, FORCE """ return pulumi.get(self, "deletion_policy") @@ -1826,6 +1901,24 @@ def state(self) -> pulumi.Output[str]: """ return pulumi.get(self, "state") + @property + @pulumi.getter(name="subscriptionType") + def subscription_type(self) -> pulumi.Output[str]: + """ + The subscrition type of cluster. + Possible values are: `TRIAL`, `STANDARD`. + """ + return pulumi.get(self, "subscription_type") + + @property + @pulumi.getter(name="trialMetadatas") + def trial_metadatas(self) -> pulumi.Output[Sequence['outputs.ClusterTrialMetadata']]: + """ + Contains information and all metadata related to TRIAL clusters. + Structure is documented below. + """ + return pulumi.get(self, "trial_metadatas") + @property @pulumi.getter def uid(self) -> pulumi.Output[str]: diff --git a/sdk/python/pulumi_gcp/alloydb/outputs.py b/sdk/python/pulumi_gcp/alloydb/outputs.py index 36d44f75d2..1c3f059b31 100644 --- a/sdk/python/pulumi_gcp/alloydb/outputs.py +++ b/sdk/python/pulumi_gcp/alloydb/outputs.py @@ -42,6 +42,7 @@ 'ClusterRestoreBackupSource', 'ClusterRestoreContinuousBackupSource', 'ClusterSecondaryConfig', + 'ClusterTrialMetadata', 'InstanceClientConnectionConfig', 'InstanceClientConnectionConfigSslConfig', 'InstanceMachineConfig', @@ -1354,6 +1355,84 @@ def primary_cluster_name(self) -> str: return pulumi.get(self, "primary_cluster_name") +@pulumi.output_type +class ClusterTrialMetadata(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "endTime": + suggest = "end_time" + elif key == "graceEndTime": + suggest = "grace_end_time" + elif key == "startTime": + suggest = "start_time" + elif key == "upgradeTime": + suggest = "upgrade_time" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in ClusterTrialMetadata. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + ClusterTrialMetadata.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + ClusterTrialMetadata.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + end_time: Optional[str] = None, + grace_end_time: Optional[str] = None, + start_time: Optional[str] = None, + upgrade_time: Optional[str] = None): + """ + :param str end_time: End time of the trial cluster. + :param str grace_end_time: Grace end time of the trial cluster. + :param str start_time: Start time of the trial cluster. + :param str upgrade_time: Upgrade time of the trial cluster to standard cluster. + """ + if end_time is not None: + pulumi.set(__self__, "end_time", end_time) + if grace_end_time is not None: + pulumi.set(__self__, "grace_end_time", grace_end_time) + if start_time is not None: + pulumi.set(__self__, "start_time", start_time) + if upgrade_time is not None: + pulumi.set(__self__, "upgrade_time", upgrade_time) + + @property + @pulumi.getter(name="endTime") + def end_time(self) -> Optional[str]: + """ + End time of the trial cluster. + """ + return pulumi.get(self, "end_time") + + @property + @pulumi.getter(name="graceEndTime") + def grace_end_time(self) -> Optional[str]: + """ + Grace end time of the trial cluster. + """ + return pulumi.get(self, "grace_end_time") + + @property + @pulumi.getter(name="startTime") + def start_time(self) -> Optional[str]: + """ + Start time of the trial cluster. + """ + return pulumi.get(self, "start_time") + + @property + @pulumi.getter(name="upgradeTime") + def upgrade_time(self) -> Optional[str]: + """ + Upgrade time of the trial cluster to standard cluster. + """ + return pulumi.get(self, "upgrade_time") + + @pulumi.output_type class InstanceClientConnectionConfig(dict): @staticmethod diff --git a/sdk/python/pulumi_gcp/assuredworkloads/workload.py b/sdk/python/pulumi_gcp/assuredworkloads/workload.py index 27b7f253da..6d97f9904d 100644 --- a/sdk/python/pulumi_gcp/assuredworkloads/workload.py +++ b/sdk/python/pulumi_gcp/assuredworkloads/workload.py @@ -31,12 +31,13 @@ def __init__(__self__, *, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, partner: Optional[pulumi.Input[str]] = None, partner_permissions: Optional[pulumi.Input['WorkloadPartnerPermissionsArgs']] = None, + partner_services_billing_account: Optional[pulumi.Input[str]] = None, provisioned_resources_parent: Optional[pulumi.Input[str]] = None, resource_settings: Optional[pulumi.Input[Sequence[pulumi.Input['WorkloadResourceSettingArgs']]]] = None, violation_notifications_enabled: Optional[pulumi.Input[bool]] = None): """ The set of arguments for constructing a Workload resource. - :param pulumi.Input[str] compliance_regime: Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + :param pulumi.Input[str] compliance_regime: Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS :param pulumi.Input[str] display_name: Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload :param pulumi.Input[str] location: The location for the resource :param pulumi.Input[str] organization: The organization for the resource @@ -51,8 +52,9 @@ def __init__(__self__, *, **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field `effective_labels` for all of the labels present on the resource. - :param pulumi.Input[str] partner: Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + :param pulumi.Input[str] partner: Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM :param pulumi.Input['WorkloadPartnerPermissionsArgs'] partner_permissions: Optional. Permissions granted to the AW Partner SA account for the customer workload + :param pulumi.Input[str] partner_services_billing_account: Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. :param pulumi.Input[str] provisioned_resources_parent: Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} :param pulumi.Input[Sequence[pulumi.Input['WorkloadResourceSettingArgs']]] resource_settings: Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional. :param pulumi.Input[bool] violation_notifications_enabled: Optional. Indicates whether the e-mail notification for a violation is enabled for a workload. This value will be by default True, and if not present will be considered as true. This should only be updated via updateWorkload call. Any Changes to this field during the createWorkload call will not be honored. This will always be true while creating the workload. @@ -73,6 +75,8 @@ def __init__(__self__, *, pulumi.set(__self__, "partner", partner) if partner_permissions is not None: pulumi.set(__self__, "partner_permissions", partner_permissions) + if partner_services_billing_account is not None: + pulumi.set(__self__, "partner_services_billing_account", partner_services_billing_account) if provisioned_resources_parent is not None: pulumi.set(__self__, "provisioned_resources_parent", provisioned_resources_parent) if resource_settings is not None: @@ -84,7 +88,7 @@ def __init__(__self__, *, @pulumi.getter(name="complianceRegime") def compliance_regime(self) -> pulumi.Input[str]: """ - Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS """ return pulumi.get(self, "compliance_regime") @@ -187,7 +191,7 @@ def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]) @pulumi.getter def partner(self) -> Optional[pulumi.Input[str]]: """ - Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM """ return pulumi.get(self, "partner") @@ -207,6 +211,18 @@ def partner_permissions(self) -> Optional[pulumi.Input['WorkloadPartnerPermissio def partner_permissions(self, value: Optional[pulumi.Input['WorkloadPartnerPermissionsArgs']]): pulumi.set(self, "partner_permissions", value) + @property + @pulumi.getter(name="partnerServicesBillingAccount") + def partner_services_billing_account(self) -> Optional[pulumi.Input[str]]: + """ + Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + """ + return pulumi.get(self, "partner_services_billing_account") + + @partner_services_billing_account.setter + def partner_services_billing_account(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "partner_services_billing_account", value) + @property @pulumi.getter(name="provisionedResourcesParent") def provisioned_resources_parent(self) -> Optional[pulumi.Input[str]]: @@ -264,6 +280,7 @@ def __init__(__self__, *, organization: Optional[pulumi.Input[str]] = None, partner: Optional[pulumi.Input[str]] = None, partner_permissions: Optional[pulumi.Input['WorkloadPartnerPermissionsArgs']] = None, + partner_services_billing_account: Optional[pulumi.Input[str]] = None, provisioned_resources_parent: Optional[pulumi.Input[str]] = None, pulumi_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, resource_settings: Optional[pulumi.Input[Sequence[pulumi.Input['WorkloadResourceSettingArgs']]]] = None, @@ -273,7 +290,7 @@ def __init__(__self__, *, """ Input properties used for looking up and filtering Workload resources. :param pulumi.Input[str] billing_account: Optional. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. - :param pulumi.Input[str] compliance_regime: Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + :param pulumi.Input[str] compliance_regime: Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS :param pulumi.Input[Sequence[pulumi.Input['WorkloadComplianceStatusArgs']]] compliance_statuses: Output only. Count of active Violations in the Workload. :param pulumi.Input[Sequence[pulumi.Input[str]]] compliant_but_disallowed_services: Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke workloads.restrictAllowedResources endpoint to allow your project developers to use these services in their environment. :param pulumi.Input[str] create_time: Output only. Immutable. The Workload creation timestamp. @@ -294,8 +311,9 @@ def __init__(__self__, *, - - - - :param pulumi.Input[str] partner: Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + :param pulumi.Input[str] partner: Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM :param pulumi.Input['WorkloadPartnerPermissionsArgs'] partner_permissions: Optional. Permissions granted to the AW Partner SA account for the customer workload + :param pulumi.Input[str] partner_services_billing_account: Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. :param pulumi.Input[str] provisioned_resources_parent: Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} :param pulumi.Input[Mapping[str, pulumi.Input[str]]] pulumi_labels: The combination of labels configured directly on the resource and default labels configured on the provider. :param pulumi.Input[Sequence[pulumi.Input['WorkloadResourceSettingArgs']]] resource_settings: Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional. @@ -337,6 +355,8 @@ def __init__(__self__, *, pulumi.set(__self__, "partner", partner) if partner_permissions is not None: pulumi.set(__self__, "partner_permissions", partner_permissions) + if partner_services_billing_account is not None: + pulumi.set(__self__, "partner_services_billing_account", partner_services_billing_account) if provisioned_resources_parent is not None: pulumi.set(__self__, "provisioned_resources_parent", provisioned_resources_parent) if pulumi_labels is not None: @@ -366,7 +386,7 @@ def billing_account(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="complianceRegime") def compliance_regime(self) -> Optional[pulumi.Input[str]]: """ - Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS """ return pulumi.get(self, "compliance_regime") @@ -541,7 +561,7 @@ def organization(self, value: Optional[pulumi.Input[str]]): @pulumi.getter def partner(self) -> Optional[pulumi.Input[str]]: """ - Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM """ return pulumi.get(self, "partner") @@ -561,6 +581,18 @@ def partner_permissions(self) -> Optional[pulumi.Input['WorkloadPartnerPermissio def partner_permissions(self, value: Optional[pulumi.Input['WorkloadPartnerPermissionsArgs']]): pulumi.set(self, "partner_permissions", value) + @property + @pulumi.getter(name="partnerServicesBillingAccount") + def partner_services_billing_account(self) -> Optional[pulumi.Input[str]]: + """ + Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + """ + return pulumi.get(self, "partner_services_billing_account") + + @partner_services_billing_account.setter + def partner_services_billing_account(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "partner_services_billing_account", value) + @property @pulumi.getter(name="provisionedResourcesParent") def provisioned_resources_parent(self) -> Optional[pulumi.Input[str]]: @@ -649,6 +681,7 @@ def __init__(__self__, organization: Optional[pulumi.Input[str]] = None, partner: Optional[pulumi.Input[str]] = None, partner_permissions: Optional[pulumi.Input[Union['WorkloadPartnerPermissionsArgs', 'WorkloadPartnerPermissionsArgsDict']]] = None, + partner_services_billing_account: Optional[pulumi.Input[str]] = None, provisioned_resources_parent: Optional[pulumi.Input[str]] = None, resource_settings: Optional[pulumi.Input[Sequence[pulumi.Input[Union['WorkloadResourceSettingArgs', 'WorkloadResourceSettingArgsDict']]]]] = None, violation_notifications_enabled: Optional[pulumi.Input[bool]] = None, @@ -677,7 +710,7 @@ def __init__(__self__, provisioned_resources_parent="folders/519620126891", resource_settings=[ { - "display_name": "folder-display-name", + "display_name": "{{name}}", "resource_type": "CONSUMER_FOLDER", }, { @@ -726,6 +759,42 @@ def __init__(__self__, "label-one": "value-one", }) ``` + ### Split_billing_partner_workload + A Split billing partner test of the assuredworkloads api + ```python + import pulumi + import pulumi_gcp as gcp + + primary = gcp.assuredworkloads.Workload("primary", + compliance_regime="ASSURED_WORKLOADS_FOR_PARTNERS", + display_name="display", + location="europe-west8", + organization="123456789", + billing_account="billingAccounts/000000-0000000-0000000-000000", + partner="SOVEREIGN_CONTROLS_BY_PSN", + partner_permissions={ + "assured_workloads_monitoring": True, + "data_logs_viewer": True, + "service_access_approver": True, + }, + partner_services_billing_account="billingAccounts/01BF3F-2C6DE5-30C607", + resource_settings=[ + { + "resource_type": "CONSUMER_FOLDER", + }, + { + "resource_type": "ENCRYPTION_KEYS_PROJECT", + }, + { + "resource_id": "ring", + "resource_type": "KEYRING", + }, + ], + violation_notifications_enabled=True, + labels={ + "label-one": "value-one", + }) + ``` ## Import @@ -748,7 +817,7 @@ def __init__(__self__, :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] billing_account: Optional. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. - :param pulumi.Input[str] compliance_regime: Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + :param pulumi.Input[str] compliance_regime: Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS :param pulumi.Input[str] display_name: Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload :param pulumi.Input[bool] enable_sovereign_controls: Optional. Indicates the sovereignty status of the given workload. Currently meant to be used by Europe/Canada customers. :param pulumi.Input[Union['WorkloadKmsSettingsArgs', 'WorkloadKmsSettingsArgsDict']] kms_settings: **DEPRECATED** Input only. Settings used to create a CMEK crypto key. When set, a project with a KMS CMEK key is provisioned. This field is deprecated as of Feb 28, 2022. In order to create a Keyring, callers should specify, ENCRYPTION_KEYS_PROJECT or KEYRING in ResourceSettings.resource_type field. @@ -762,8 +831,9 @@ def __init__(__self__, - - - - :param pulumi.Input[str] partner: Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + :param pulumi.Input[str] partner: Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM :param pulumi.Input[Union['WorkloadPartnerPermissionsArgs', 'WorkloadPartnerPermissionsArgsDict']] partner_permissions: Optional. Permissions granted to the AW Partner SA account for the customer workload + :param pulumi.Input[str] partner_services_billing_account: Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. :param pulumi.Input[str] provisioned_resources_parent: Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} :param pulumi.Input[Sequence[pulumi.Input[Union['WorkloadResourceSettingArgs', 'WorkloadResourceSettingArgsDict']]]] resource_settings: Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional. :param pulumi.Input[bool] violation_notifications_enabled: Optional. Indicates whether the e-mail notification for a violation is enabled for a workload. This value will be by default True, and if not present will be considered as true. This should only be updated via updateWorkload call. Any Changes to this field during the createWorkload call will not be honored. This will always be true while creating the workload. @@ -798,7 +868,7 @@ def __init__(__self__, provisioned_resources_parent="folders/519620126891", resource_settings=[ { - "display_name": "folder-display-name", + "display_name": "{{name}}", "resource_type": "CONSUMER_FOLDER", }, { @@ -847,6 +917,42 @@ def __init__(__self__, "label-one": "value-one", }) ``` + ### Split_billing_partner_workload + A Split billing partner test of the assuredworkloads api + ```python + import pulumi + import pulumi_gcp as gcp + + primary = gcp.assuredworkloads.Workload("primary", + compliance_regime="ASSURED_WORKLOADS_FOR_PARTNERS", + display_name="display", + location="europe-west8", + organization="123456789", + billing_account="billingAccounts/000000-0000000-0000000-000000", + partner="SOVEREIGN_CONTROLS_BY_PSN", + partner_permissions={ + "assured_workloads_monitoring": True, + "data_logs_viewer": True, + "service_access_approver": True, + }, + partner_services_billing_account="billingAccounts/01BF3F-2C6DE5-30C607", + resource_settings=[ + { + "resource_type": "CONSUMER_FOLDER", + }, + { + "resource_type": "ENCRYPTION_KEYS_PROJECT", + }, + { + "resource_id": "ring", + "resource_type": "KEYRING", + }, + ], + violation_notifications_enabled=True, + labels={ + "label-one": "value-one", + }) + ``` ## Import @@ -891,6 +997,7 @@ def _internal_init(__self__, organization: Optional[pulumi.Input[str]] = None, partner: Optional[pulumi.Input[str]] = None, partner_permissions: Optional[pulumi.Input[Union['WorkloadPartnerPermissionsArgs', 'WorkloadPartnerPermissionsArgsDict']]] = None, + partner_services_billing_account: Optional[pulumi.Input[str]] = None, provisioned_resources_parent: Optional[pulumi.Input[str]] = None, resource_settings: Optional[pulumi.Input[Sequence[pulumi.Input[Union['WorkloadResourceSettingArgs', 'WorkloadResourceSettingArgsDict']]]]] = None, violation_notifications_enabled: Optional[pulumi.Input[bool]] = None, @@ -921,6 +1028,7 @@ def _internal_init(__self__, __props__.__dict__["organization"] = organization __props__.__dict__["partner"] = partner __props__.__dict__["partner_permissions"] = partner_permissions + __props__.__dict__["partner_services_billing_account"] = partner_services_billing_account __props__.__dict__["provisioned_resources_parent"] = provisioned_resources_parent __props__.__dict__["resource_settings"] = resource_settings __props__.__dict__["violation_notifications_enabled"] = violation_notifications_enabled @@ -963,6 +1071,7 @@ def get(resource_name: str, organization: Optional[pulumi.Input[str]] = None, partner: Optional[pulumi.Input[str]] = None, partner_permissions: Optional[pulumi.Input[Union['WorkloadPartnerPermissionsArgs', 'WorkloadPartnerPermissionsArgsDict']]] = None, + partner_services_billing_account: Optional[pulumi.Input[str]] = None, provisioned_resources_parent: Optional[pulumi.Input[str]] = None, pulumi_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, resource_settings: Optional[pulumi.Input[Sequence[pulumi.Input[Union['WorkloadResourceSettingArgs', 'WorkloadResourceSettingArgsDict']]]]] = None, @@ -977,7 +1086,7 @@ def get(resource_name: str, :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] billing_account: Optional. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF`. - :param pulumi.Input[str] compliance_regime: Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + :param pulumi.Input[str] compliance_regime: Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS :param pulumi.Input[Sequence[pulumi.Input[Union['WorkloadComplianceStatusArgs', 'WorkloadComplianceStatusArgsDict']]]] compliance_statuses: Output only. Count of active Violations in the Workload. :param pulumi.Input[Sequence[pulumi.Input[str]]] compliant_but_disallowed_services: Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke workloads.restrictAllowedResources endpoint to allow your project developers to use these services in their environment. :param pulumi.Input[str] create_time: Output only. Immutable. The Workload creation timestamp. @@ -998,8 +1107,9 @@ def get(resource_name: str, - - - - :param pulumi.Input[str] partner: Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + :param pulumi.Input[str] partner: Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM :param pulumi.Input[Union['WorkloadPartnerPermissionsArgs', 'WorkloadPartnerPermissionsArgsDict']] partner_permissions: Optional. Permissions granted to the AW Partner SA account for the customer workload + :param pulumi.Input[str] partner_services_billing_account: Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. :param pulumi.Input[str] provisioned_resources_parent: Input only. The parent resource for the resources managed by this Assured Workload. May be either empty or a folder resource which is a child of the Workload parent. If not specified all resources are created under the parent organization. Format: folders/{folder_id} :param pulumi.Input[Mapping[str, pulumi.Input[str]]] pulumi_labels: The combination of labels configured directly on the resource and default labels configured on the provider. :param pulumi.Input[Sequence[pulumi.Input[Union['WorkloadResourceSettingArgs', 'WorkloadResourceSettingArgsDict']]]] resource_settings: Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional. @@ -1028,6 +1138,7 @@ def get(resource_name: str, __props__.__dict__["organization"] = organization __props__.__dict__["partner"] = partner __props__.__dict__["partner_permissions"] = partner_permissions + __props__.__dict__["partner_services_billing_account"] = partner_services_billing_account __props__.__dict__["provisioned_resources_parent"] = provisioned_resources_parent __props__.__dict__["pulumi_labels"] = pulumi_labels __props__.__dict__["resource_settings"] = resource_settings @@ -1048,7 +1159,7 @@ def billing_account(self) -> pulumi.Output[Optional[str]]: @pulumi.getter(name="complianceRegime") def compliance_regime(self) -> pulumi.Output[str]: """ - Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT + Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS, HIPAA, HITRUST, EU_REGIONS_AND_SUPPORT, CA_REGIONS_AND_SUPPORT, ITAR, AU_REGIONS_AND_US_SUPPORT, ASSURED_WORKLOADS_FOR_PARTNERS, ISR_REGIONS, ISR_REGIONS_AND_SUPPORT, CA_PROTECTED_B, IL5, IL2, JP_REGIONS_AND_SUPPORT, KSA_REGIONS_AND_SUPPORT_WITH_SOVEREIGNTY_CONTROLS, REGIONAL_CONTROLS """ return pulumi.get(self, "compliance_regime") @@ -1167,7 +1278,7 @@ def organization(self) -> pulumi.Output[str]: @pulumi.getter def partner(self) -> pulumi.Output[Optional[str]]: """ - Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN + Optional. Partner regime associated with this workload. Possible values: PARTNER_UNSPECIFIED, LOCAL_CONTROLS_BY_S3NS, SOVEREIGN_CONTROLS_BY_T_SYSTEMS, SOVEREIGN_CONTROLS_BY_SIA_MINSAIT, SOVEREIGN_CONTROLS_BY_PSN, SOVEREIGN_CONTROLS_BY_CNTXT, SOVEREIGN_CONTROLS_BY_CNTXT_NO_EKM """ return pulumi.get(self, "partner") @@ -1179,6 +1290,14 @@ def partner_permissions(self) -> pulumi.Output[Optional['outputs.WorkloadPartner """ return pulumi.get(self, "partner_permissions") + @property + @pulumi.getter(name="partnerServicesBillingAccount") + def partner_services_billing_account(self) -> pulumi.Output[Optional[str]]: + """ + Optional. Input only. Billing account necessary for purchasing services from Sovereign Partners. This field is required for creating SIA/PSN/CNTXT partner workloads. The caller should have 'billing.resourceAssociations.create' IAM permission on this billing-account. The format of this string is billingAccounts/AAAAAA-BBBBBB-CCCCCC. + """ + return pulumi.get(self, "partner_services_billing_account") + @property @pulumi.getter(name="provisionedResourcesParent") def provisioned_resources_parent(self) -> pulumi.Output[Optional[str]]: diff --git a/sdk/python/pulumi_gcp/backupdisasterrecovery/__init__.py b/sdk/python/pulumi_gcp/backupdisasterrecovery/__init__.py index b18952f3f5..5d97d6d4ef 100644 --- a/sdk/python/pulumi_gcp/backupdisasterrecovery/__init__.py +++ b/sdk/python/pulumi_gcp/backupdisasterrecovery/__init__.py @@ -5,6 +5,7 @@ from .. import _utilities import typing # Export this package's modules as members: +from .backup_vault import * from .get_management_server import * from .management_server import * from ._inputs import * diff --git a/sdk/python/pulumi_gcp/backupdisasterrecovery/backup_vault.py b/sdk/python/pulumi_gcp/backupdisasterrecovery/backup_vault.py new file mode 100644 index 0000000000..beb7064bf5 --- /dev/null +++ b/sdk/python/pulumi_gcp/backupdisasterrecovery/backup_vault.py @@ -0,0 +1,1203 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import sys +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +if sys.version_info >= (3, 11): + from typing import NotRequired, TypedDict, TypeAlias +else: + from typing_extensions import NotRequired, TypedDict, TypeAlias +from .. import _utilities + +__all__ = ['BackupVaultArgs', 'BackupVault'] + +@pulumi.input_type +class BackupVaultArgs: + def __init__(__self__, *, + backup_minimum_enforced_retention_duration: pulumi.Input[str], + backup_vault_id: pulumi.Input[str], + location: pulumi.Input[str], + allow_missing: Optional[pulumi.Input[bool]] = None, + annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + description: Optional[pulumi.Input[str]] = None, + effective_time: Optional[pulumi.Input[str]] = None, + force_delete: Optional[pulumi.Input[bool]] = None, + force_update: Optional[pulumi.Input[bool]] = None, + labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + project: Optional[pulumi.Input[str]] = None): + """ + The set of arguments for constructing a BackupVault resource. + :param pulumi.Input[str] backup_minimum_enforced_retention_duration: Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + :param pulumi.Input[str] backup_vault_id: Required. ID of the requesting object. + + + - - - + :param pulumi.Input[str] location: The GCP location for the backup vault. + :param pulumi.Input[bool] allow_missing: Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Optional. User annotations. See https://google.aip.dev/128#annotations + Stores small amounts of arbitrary data. + **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + Please refer to the field `effective_annotations` for all of the annotations present on the resource. + :param pulumi.Input[str] description: Optional. The description of the BackupVault instance (2048 characters or less). + :param pulumi.Input[str] effective_time: Optional. Time after which the BackupVault resource is locked. + :param pulumi.Input[bool] force_delete: If set, the following restrictions against deletion of the backup vault instance can be overridden: + * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * deletion of a backup vault instance that is being referenced by an active backup plan. + :param pulumi.Input[bool] force_update: If set, allow update to extend the minimum enforced retention for backup vault. This overrides + the restriction against conflicting retention periods. This conflict may occur when the + expiration schedule defined by the associated backup plan is shorter than the minimum + retention set by the backup vault. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. Resource labels to represent user provided metadata. + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field `effective_labels` for all of the labels present on the resource. + :param pulumi.Input[str] project: The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + """ + pulumi.set(__self__, "backup_minimum_enforced_retention_duration", backup_minimum_enforced_retention_duration) + pulumi.set(__self__, "backup_vault_id", backup_vault_id) + pulumi.set(__self__, "location", location) + if allow_missing is not None: + pulumi.set(__self__, "allow_missing", allow_missing) + if annotations is not None: + pulumi.set(__self__, "annotations", annotations) + if description is not None: + pulumi.set(__self__, "description", description) + if effective_time is not None: + pulumi.set(__self__, "effective_time", effective_time) + if force_delete is not None: + pulumi.set(__self__, "force_delete", force_delete) + if force_update is not None: + pulumi.set(__self__, "force_update", force_update) + if labels is not None: + pulumi.set(__self__, "labels", labels) + if project is not None: + pulumi.set(__self__, "project", project) + + @property + @pulumi.getter(name="backupMinimumEnforcedRetentionDuration") + def backup_minimum_enforced_retention_duration(self) -> pulumi.Input[str]: + """ + Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + """ + return pulumi.get(self, "backup_minimum_enforced_retention_duration") + + @backup_minimum_enforced_retention_duration.setter + def backup_minimum_enforced_retention_duration(self, value: pulumi.Input[str]): + pulumi.set(self, "backup_minimum_enforced_retention_duration", value) + + @property + @pulumi.getter(name="backupVaultId") + def backup_vault_id(self) -> pulumi.Input[str]: + """ + Required. ID of the requesting object. + + + - - - + """ + return pulumi.get(self, "backup_vault_id") + + @backup_vault_id.setter + def backup_vault_id(self, value: pulumi.Input[str]): + pulumi.set(self, "backup_vault_id", value) + + @property + @pulumi.getter + def location(self) -> pulumi.Input[str]: + """ + The GCP location for the backup vault. + """ + return pulumi.get(self, "location") + + @location.setter + def location(self, value: pulumi.Input[str]): + pulumi.set(self, "location", value) + + @property + @pulumi.getter(name="allowMissing") + def allow_missing(self) -> Optional[pulumi.Input[bool]]: + """ + Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + """ + return pulumi.get(self, "allow_missing") + + @allow_missing.setter + def allow_missing(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "allow_missing", value) + + @property + @pulumi.getter + def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: + """ + Optional. User annotations. See https://google.aip.dev/128#annotations + Stores small amounts of arbitrary data. + **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + Please refer to the field `effective_annotations` for all of the annotations present on the resource. + """ + return pulumi.get(self, "annotations") + + @annotations.setter + def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): + pulumi.set(self, "annotations", value) + + @property + @pulumi.getter + def description(self) -> Optional[pulumi.Input[str]]: + """ + Optional. The description of the BackupVault instance (2048 characters or less). + """ + return pulumi.get(self, "description") + + @description.setter + def description(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "description", value) + + @property + @pulumi.getter(name="effectiveTime") + def effective_time(self) -> Optional[pulumi.Input[str]]: + """ + Optional. Time after which the BackupVault resource is locked. + """ + return pulumi.get(self, "effective_time") + + @effective_time.setter + def effective_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "effective_time", value) + + @property + @pulumi.getter(name="forceDelete") + def force_delete(self) -> Optional[pulumi.Input[bool]]: + """ + If set, the following restrictions against deletion of the backup vault instance can be overridden: + * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * deletion of a backup vault instance that is being referenced by an active backup plan. + """ + return pulumi.get(self, "force_delete") + + @force_delete.setter + def force_delete(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "force_delete", value) + + @property + @pulumi.getter(name="forceUpdate") + def force_update(self) -> Optional[pulumi.Input[bool]]: + """ + If set, allow update to extend the minimum enforced retention for backup vault. This overrides + the restriction against conflicting retention periods. This conflict may occur when the + expiration schedule defined by the associated backup plan is shorter than the minimum + retention set by the backup vault. + """ + return pulumi.get(self, "force_update") + + @force_update.setter + def force_update(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "force_update", value) + + @property + @pulumi.getter + def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: + """ + Optional. Resource labels to represent user provided metadata. + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field `effective_labels` for all of the labels present on the resource. + """ + return pulumi.get(self, "labels") + + @labels.setter + def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): + pulumi.set(self, "labels", value) + + @property + @pulumi.getter + def project(self) -> Optional[pulumi.Input[str]]: + """ + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + """ + return pulumi.get(self, "project") + + @project.setter + def project(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "project", value) + + +@pulumi.input_type +class _BackupVaultState: + def __init__(__self__, *, + allow_missing: Optional[pulumi.Input[bool]] = None, + annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + backup_count: Optional[pulumi.Input[str]] = None, + backup_minimum_enforced_retention_duration: Optional[pulumi.Input[str]] = None, + backup_vault_id: Optional[pulumi.Input[str]] = None, + create_time: Optional[pulumi.Input[str]] = None, + deletable: Optional[pulumi.Input[bool]] = None, + description: Optional[pulumi.Input[str]] = None, + effective_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + effective_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + effective_time: Optional[pulumi.Input[str]] = None, + etag: Optional[pulumi.Input[str]] = None, + force_delete: Optional[pulumi.Input[bool]] = None, + force_update: Optional[pulumi.Input[bool]] = None, + labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + location: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + project: Optional[pulumi.Input[str]] = None, + pulumi_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + service_account: Optional[pulumi.Input[str]] = None, + state: Optional[pulumi.Input[str]] = None, + total_stored_bytes: Optional[pulumi.Input[str]] = None, + uid: Optional[pulumi.Input[str]] = None, + update_time: Optional[pulumi.Input[str]] = None): + """ + Input properties used for looking up and filtering BackupVault resources. + :param pulumi.Input[bool] allow_missing: Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Optional. User annotations. See https://google.aip.dev/128#annotations + Stores small amounts of arbitrary data. + **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + Please refer to the field `effective_annotations` for all of the annotations present on the resource. + :param pulumi.Input[str] backup_count: Output only. The number of backups in this backup vault. + :param pulumi.Input[str] backup_minimum_enforced_retention_duration: Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + :param pulumi.Input[str] backup_vault_id: Required. ID of the requesting object. + + + - - - + :param pulumi.Input[str] create_time: Output only. The time when the instance was created. + :param pulumi.Input[bool] deletable: Output only. Set to true when there are no backups nested under this resource. + :param pulumi.Input[str] description: Optional. The description of the BackupVault instance (2048 characters or less). + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] effective_labels: All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + :param pulumi.Input[str] effective_time: Optional. Time after which the BackupVault resource is locked. + :param pulumi.Input[str] etag: Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + :param pulumi.Input[bool] force_delete: If set, the following restrictions against deletion of the backup vault instance can be overridden: + * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * deletion of a backup vault instance that is being referenced by an active backup plan. + :param pulumi.Input[bool] force_update: If set, allow update to extend the minimum enforced retention for backup vault. This overrides + the restriction against conflicting retention periods. This conflict may occur when the + expiration schedule defined by the associated backup plan is shorter than the minimum + retention set by the backup vault. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. Resource labels to represent user provided metadata. + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field `effective_labels` for all of the labels present on the resource. + :param pulumi.Input[str] location: The GCP location for the backup vault. + :param pulumi.Input[str] name: Output only. Identifier. The resource name. + :param pulumi.Input[str] project: The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] pulumi_labels: The combination of labels configured directly on the resource + and default labels configured on the provider. + :param pulumi.Input[str] service_account: Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + :param pulumi.Input[str] state: Output only. The BackupVault resource instance state. + Possible values: + STATE_UNSPECIFIED + CREATING + ACTIVE + DELETING + ERROR + :param pulumi.Input[str] total_stored_bytes: Output only. Total size of the storage used by all backup resources. + :param pulumi.Input[str] uid: Output only. Output only Immutable after resource creation until resource deletion. + :param pulumi.Input[str] update_time: Output only. The time when the instance was updated. + """ + if allow_missing is not None: + pulumi.set(__self__, "allow_missing", allow_missing) + if annotations is not None: + pulumi.set(__self__, "annotations", annotations) + if backup_count is not None: + pulumi.set(__self__, "backup_count", backup_count) + if backup_minimum_enforced_retention_duration is not None: + pulumi.set(__self__, "backup_minimum_enforced_retention_duration", backup_minimum_enforced_retention_duration) + if backup_vault_id is not None: + pulumi.set(__self__, "backup_vault_id", backup_vault_id) + if create_time is not None: + pulumi.set(__self__, "create_time", create_time) + if deletable is not None: + pulumi.set(__self__, "deletable", deletable) + if description is not None: + pulumi.set(__self__, "description", description) + if effective_annotations is not None: + pulumi.set(__self__, "effective_annotations", effective_annotations) + if effective_labels is not None: + pulumi.set(__self__, "effective_labels", effective_labels) + if effective_time is not None: + pulumi.set(__self__, "effective_time", effective_time) + if etag is not None: + pulumi.set(__self__, "etag", etag) + if force_delete is not None: + pulumi.set(__self__, "force_delete", force_delete) + if force_update is not None: + pulumi.set(__self__, "force_update", force_update) + if labels is not None: + pulumi.set(__self__, "labels", labels) + if location is not None: + pulumi.set(__self__, "location", location) + if name is not None: + pulumi.set(__self__, "name", name) + if project is not None: + pulumi.set(__self__, "project", project) + if pulumi_labels is not None: + pulumi.set(__self__, "pulumi_labels", pulumi_labels) + if service_account is not None: + pulumi.set(__self__, "service_account", service_account) + if state is not None: + pulumi.set(__self__, "state", state) + if total_stored_bytes is not None: + pulumi.set(__self__, "total_stored_bytes", total_stored_bytes) + if uid is not None: + pulumi.set(__self__, "uid", uid) + if update_time is not None: + pulumi.set(__self__, "update_time", update_time) + + @property + @pulumi.getter(name="allowMissing") + def allow_missing(self) -> Optional[pulumi.Input[bool]]: + """ + Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + """ + return pulumi.get(self, "allow_missing") + + @allow_missing.setter + def allow_missing(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "allow_missing", value) + + @property + @pulumi.getter + def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: + """ + Optional. User annotations. See https://google.aip.dev/128#annotations + Stores small amounts of arbitrary data. + **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + Please refer to the field `effective_annotations` for all of the annotations present on the resource. + """ + return pulumi.get(self, "annotations") + + @annotations.setter + def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): + pulumi.set(self, "annotations", value) + + @property + @pulumi.getter(name="backupCount") + def backup_count(self) -> Optional[pulumi.Input[str]]: + """ + Output only. The number of backups in this backup vault. + """ + return pulumi.get(self, "backup_count") + + @backup_count.setter + def backup_count(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "backup_count", value) + + @property + @pulumi.getter(name="backupMinimumEnforcedRetentionDuration") + def backup_minimum_enforced_retention_duration(self) -> Optional[pulumi.Input[str]]: + """ + Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + """ + return pulumi.get(self, "backup_minimum_enforced_retention_duration") + + @backup_minimum_enforced_retention_duration.setter + def backup_minimum_enforced_retention_duration(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "backup_minimum_enforced_retention_duration", value) + + @property + @pulumi.getter(name="backupVaultId") + def backup_vault_id(self) -> Optional[pulumi.Input[str]]: + """ + Required. ID of the requesting object. + + + - - - + """ + return pulumi.get(self, "backup_vault_id") + + @backup_vault_id.setter + def backup_vault_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "backup_vault_id", value) + + @property + @pulumi.getter(name="createTime") + def create_time(self) -> Optional[pulumi.Input[str]]: + """ + Output only. The time when the instance was created. + """ + return pulumi.get(self, "create_time") + + @create_time.setter + def create_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "create_time", value) + + @property + @pulumi.getter + def deletable(self) -> Optional[pulumi.Input[bool]]: + """ + Output only. Set to true when there are no backups nested under this resource. + """ + return pulumi.get(self, "deletable") + + @deletable.setter + def deletable(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "deletable", value) + + @property + @pulumi.getter + def description(self) -> Optional[pulumi.Input[str]]: + """ + Optional. The description of the BackupVault instance (2048 characters or less). + """ + return pulumi.get(self, "description") + + @description.setter + def description(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "description", value) + + @property + @pulumi.getter(name="effectiveAnnotations") + def effective_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: + return pulumi.get(self, "effective_annotations") + + @effective_annotations.setter + def effective_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): + pulumi.set(self, "effective_annotations", value) + + @property + @pulumi.getter(name="effectiveLabels") + def effective_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: + """ + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + """ + return pulumi.get(self, "effective_labels") + + @effective_labels.setter + def effective_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): + pulumi.set(self, "effective_labels", value) + + @property + @pulumi.getter(name="effectiveTime") + def effective_time(self) -> Optional[pulumi.Input[str]]: + """ + Optional. Time after which the BackupVault resource is locked. + """ + return pulumi.get(self, "effective_time") + + @effective_time.setter + def effective_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "effective_time", value) + + @property + @pulumi.getter + def etag(self) -> Optional[pulumi.Input[str]]: + """ + Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + """ + return pulumi.get(self, "etag") + + @etag.setter + def etag(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "etag", value) + + @property + @pulumi.getter(name="forceDelete") + def force_delete(self) -> Optional[pulumi.Input[bool]]: + """ + If set, the following restrictions against deletion of the backup vault instance can be overridden: + * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * deletion of a backup vault instance that is being referenced by an active backup plan. + """ + return pulumi.get(self, "force_delete") + + @force_delete.setter + def force_delete(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "force_delete", value) + + @property + @pulumi.getter(name="forceUpdate") + def force_update(self) -> Optional[pulumi.Input[bool]]: + """ + If set, allow update to extend the minimum enforced retention for backup vault. This overrides + the restriction against conflicting retention periods. This conflict may occur when the + expiration schedule defined by the associated backup plan is shorter than the minimum + retention set by the backup vault. + """ + return pulumi.get(self, "force_update") + + @force_update.setter + def force_update(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "force_update", value) + + @property + @pulumi.getter + def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: + """ + Optional. Resource labels to represent user provided metadata. + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field `effective_labels` for all of the labels present on the resource. + """ + return pulumi.get(self, "labels") + + @labels.setter + def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): + pulumi.set(self, "labels", value) + + @property + @pulumi.getter + def location(self) -> Optional[pulumi.Input[str]]: + """ + The GCP location for the backup vault. + """ + return pulumi.get(self, "location") + + @location.setter + def location(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "location", value) + + @property + @pulumi.getter + def name(self) -> Optional[pulumi.Input[str]]: + """ + Output only. Identifier. The resource name. + """ + return pulumi.get(self, "name") + + @name.setter + def name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "name", value) + + @property + @pulumi.getter + def project(self) -> Optional[pulumi.Input[str]]: + """ + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + """ + return pulumi.get(self, "project") + + @project.setter + def project(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "project", value) + + @property + @pulumi.getter(name="pulumiLabels") + def pulumi_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: + """ + The combination of labels configured directly on the resource + and default labels configured on the provider. + """ + return pulumi.get(self, "pulumi_labels") + + @pulumi_labels.setter + def pulumi_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): + pulumi.set(self, "pulumi_labels", value) + + @property + @pulumi.getter(name="serviceAccount") + def service_account(self) -> Optional[pulumi.Input[str]]: + """ + Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + """ + return pulumi.get(self, "service_account") + + @service_account.setter + def service_account(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "service_account", value) + + @property + @pulumi.getter + def state(self) -> Optional[pulumi.Input[str]]: + """ + Output only. The BackupVault resource instance state. + Possible values: + STATE_UNSPECIFIED + CREATING + ACTIVE + DELETING + ERROR + """ + return pulumi.get(self, "state") + + @state.setter + def state(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "state", value) + + @property + @pulumi.getter(name="totalStoredBytes") + def total_stored_bytes(self) -> Optional[pulumi.Input[str]]: + """ + Output only. Total size of the storage used by all backup resources. + """ + return pulumi.get(self, "total_stored_bytes") + + @total_stored_bytes.setter + def total_stored_bytes(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "total_stored_bytes", value) + + @property + @pulumi.getter + def uid(self) -> Optional[pulumi.Input[str]]: + """ + Output only. Output only Immutable after resource creation until resource deletion. + """ + return pulumi.get(self, "uid") + + @uid.setter + def uid(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "uid", value) + + @property + @pulumi.getter(name="updateTime") + def update_time(self) -> Optional[pulumi.Input[str]]: + """ + Output only. The time when the instance was updated. + """ + return pulumi.get(self, "update_time") + + @update_time.setter + def update_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "update_time", value) + + +class BackupVault(pulumi.CustomResource): + @overload + def __init__(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + allow_missing: Optional[pulumi.Input[bool]] = None, + annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + backup_minimum_enforced_retention_duration: Optional[pulumi.Input[str]] = None, + backup_vault_id: Optional[pulumi.Input[str]] = None, + description: Optional[pulumi.Input[str]] = None, + effective_time: Optional[pulumi.Input[str]] = None, + force_delete: Optional[pulumi.Input[bool]] = None, + force_update: Optional[pulumi.Input[bool]] = None, + labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + location: Optional[pulumi.Input[str]] = None, + project: Optional[pulumi.Input[str]] = None, + __props__=None): + """ + ## Example Usage + + ### Backup Dr Backup Vault Full + + ```python + import pulumi + import pulumi_gcp as gcp + + backup_vault_test = gcp.backupdisasterrecovery.BackupVault("backup-vault-test", + location="us-central1", + backup_vault_id="backup-vault-test", + description="This is a second backup vault built by Terraform.", + backup_minimum_enforced_retention_duration="100000s", + labels={ + "foo": "bar1", + "bar": "baz1", + }, + annotations={ + "annotations1": "bar1", + "annotations2": "baz1", + }, + force_update=True, + force_delete=True, + allow_missing=True) + ``` + + ## Import + + BackupVault can be imported using any of these accepted formats: + + * `projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}` + + * `{{project}}/{{location}}/{{backup_vault_id}}` + + * `{{location}}/{{backup_vault_id}}` + + When using the `pulumi import` command, BackupVault can be imported using one of the formats above. For example: + + ```sh + $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}} + ``` + + ```sh + $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default {{project}}/{{location}}/{{backup_vault_id}} + ``` + + ```sh + $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default {{location}}/{{backup_vault_id}} + ``` + + :param str resource_name: The name of the resource. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[bool] allow_missing: Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Optional. User annotations. See https://google.aip.dev/128#annotations + Stores small amounts of arbitrary data. + **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + Please refer to the field `effective_annotations` for all of the annotations present on the resource. + :param pulumi.Input[str] backup_minimum_enforced_retention_duration: Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + :param pulumi.Input[str] backup_vault_id: Required. ID of the requesting object. + + + - - - + :param pulumi.Input[str] description: Optional. The description of the BackupVault instance (2048 characters or less). + :param pulumi.Input[str] effective_time: Optional. Time after which the BackupVault resource is locked. + :param pulumi.Input[bool] force_delete: If set, the following restrictions against deletion of the backup vault instance can be overridden: + * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * deletion of a backup vault instance that is being referenced by an active backup plan. + :param pulumi.Input[bool] force_update: If set, allow update to extend the minimum enforced retention for backup vault. This overrides + the restriction against conflicting retention periods. This conflict may occur when the + expiration schedule defined by the associated backup plan is shorter than the minimum + retention set by the backup vault. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. Resource labels to represent user provided metadata. + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field `effective_labels` for all of the labels present on the resource. + :param pulumi.Input[str] location: The GCP location for the backup vault. + :param pulumi.Input[str] project: The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + """ + ... + @overload + def __init__(__self__, + resource_name: str, + args: BackupVaultArgs, + opts: Optional[pulumi.ResourceOptions] = None): + """ + ## Example Usage + + ### Backup Dr Backup Vault Full + + ```python + import pulumi + import pulumi_gcp as gcp + + backup_vault_test = gcp.backupdisasterrecovery.BackupVault("backup-vault-test", + location="us-central1", + backup_vault_id="backup-vault-test", + description="This is a second backup vault built by Terraform.", + backup_minimum_enforced_retention_duration="100000s", + labels={ + "foo": "bar1", + "bar": "baz1", + }, + annotations={ + "annotations1": "bar1", + "annotations2": "baz1", + }, + force_update=True, + force_delete=True, + allow_missing=True) + ``` + + ## Import + + BackupVault can be imported using any of these accepted formats: + + * `projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}}` + + * `{{project}}/{{location}}/{{backup_vault_id}}` + + * `{{location}}/{{backup_vault_id}}` + + When using the `pulumi import` command, BackupVault can be imported using one of the formats above. For example: + + ```sh + $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{backup_vault_id}} + ``` + + ```sh + $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default {{project}}/{{location}}/{{backup_vault_id}} + ``` + + ```sh + $ pulumi import gcp:backupdisasterrecovery/backupVault:BackupVault default {{location}}/{{backup_vault_id}} + ``` + + :param str resource_name: The name of the resource. + :param BackupVaultArgs args: The arguments to use to populate this resource's properties. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + ... + def __init__(__self__, resource_name: str, *args, **kwargs): + resource_args, opts = _utilities.get_resource_args_opts(BackupVaultArgs, pulumi.ResourceOptions, *args, **kwargs) + if resource_args is not None: + __self__._internal_init(resource_name, opts, **resource_args.__dict__) + else: + __self__._internal_init(resource_name, *args, **kwargs) + + def _internal_init(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + allow_missing: Optional[pulumi.Input[bool]] = None, + annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + backup_minimum_enforced_retention_duration: Optional[pulumi.Input[str]] = None, + backup_vault_id: Optional[pulumi.Input[str]] = None, + description: Optional[pulumi.Input[str]] = None, + effective_time: Optional[pulumi.Input[str]] = None, + force_delete: Optional[pulumi.Input[bool]] = None, + force_update: Optional[pulumi.Input[bool]] = None, + labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + location: Optional[pulumi.Input[str]] = None, + project: Optional[pulumi.Input[str]] = None, + __props__=None): + opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) + if not isinstance(opts, pulumi.ResourceOptions): + raise TypeError('Expected resource options to be a ResourceOptions instance') + if opts.id is None: + if __props__ is not None: + raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') + __props__ = BackupVaultArgs.__new__(BackupVaultArgs) + + __props__.__dict__["allow_missing"] = allow_missing + __props__.__dict__["annotations"] = annotations + if backup_minimum_enforced_retention_duration is None and not opts.urn: + raise TypeError("Missing required property 'backup_minimum_enforced_retention_duration'") + __props__.__dict__["backup_minimum_enforced_retention_duration"] = backup_minimum_enforced_retention_duration + if backup_vault_id is None and not opts.urn: + raise TypeError("Missing required property 'backup_vault_id'") + __props__.__dict__["backup_vault_id"] = backup_vault_id + __props__.__dict__["description"] = description + __props__.__dict__["effective_time"] = effective_time + __props__.__dict__["force_delete"] = force_delete + __props__.__dict__["force_update"] = force_update + __props__.__dict__["labels"] = labels + if location is None and not opts.urn: + raise TypeError("Missing required property 'location'") + __props__.__dict__["location"] = location + __props__.__dict__["project"] = project + __props__.__dict__["backup_count"] = None + __props__.__dict__["create_time"] = None + __props__.__dict__["deletable"] = None + __props__.__dict__["effective_annotations"] = None + __props__.__dict__["effective_labels"] = None + __props__.__dict__["etag"] = None + __props__.__dict__["name"] = None + __props__.__dict__["pulumi_labels"] = None + __props__.__dict__["service_account"] = None + __props__.__dict__["state"] = None + __props__.__dict__["total_stored_bytes"] = None + __props__.__dict__["uid"] = None + __props__.__dict__["update_time"] = None + secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["effectiveLabels", "pulumiLabels"]) + opts = pulumi.ResourceOptions.merge(opts, secret_opts) + super(BackupVault, __self__).__init__( + 'gcp:backupdisasterrecovery/backupVault:BackupVault', + resource_name, + __props__, + opts) + + @staticmethod + def get(resource_name: str, + id: pulumi.Input[str], + opts: Optional[pulumi.ResourceOptions] = None, + allow_missing: Optional[pulumi.Input[bool]] = None, + annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + backup_count: Optional[pulumi.Input[str]] = None, + backup_minimum_enforced_retention_duration: Optional[pulumi.Input[str]] = None, + backup_vault_id: Optional[pulumi.Input[str]] = None, + create_time: Optional[pulumi.Input[str]] = None, + deletable: Optional[pulumi.Input[bool]] = None, + description: Optional[pulumi.Input[str]] = None, + effective_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + effective_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + effective_time: Optional[pulumi.Input[str]] = None, + etag: Optional[pulumi.Input[str]] = None, + force_delete: Optional[pulumi.Input[bool]] = None, + force_update: Optional[pulumi.Input[bool]] = None, + labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + location: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + project: Optional[pulumi.Input[str]] = None, + pulumi_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + service_account: Optional[pulumi.Input[str]] = None, + state: Optional[pulumi.Input[str]] = None, + total_stored_bytes: Optional[pulumi.Input[str]] = None, + uid: Optional[pulumi.Input[str]] = None, + update_time: Optional[pulumi.Input[str]] = None) -> 'BackupVault': + """ + Get an existing BackupVault resource's state with the given name, id, and optional extra + properties used to qualify the lookup. + + :param str resource_name: The unique name of the resulting resource. + :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[bool] allow_missing: Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Optional. User annotations. See https://google.aip.dev/128#annotations + Stores small amounts of arbitrary data. + **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + Please refer to the field `effective_annotations` for all of the annotations present on the resource. + :param pulumi.Input[str] backup_count: Output only. The number of backups in this backup vault. + :param pulumi.Input[str] backup_minimum_enforced_retention_duration: Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + :param pulumi.Input[str] backup_vault_id: Required. ID of the requesting object. + + + - - - + :param pulumi.Input[str] create_time: Output only. The time when the instance was created. + :param pulumi.Input[bool] deletable: Output only. Set to true when there are no backups nested under this resource. + :param pulumi.Input[str] description: Optional. The description of the BackupVault instance (2048 characters or less). + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] effective_labels: All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + :param pulumi.Input[str] effective_time: Optional. Time after which the BackupVault resource is locked. + :param pulumi.Input[str] etag: Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + :param pulumi.Input[bool] force_delete: If set, the following restrictions against deletion of the backup vault instance can be overridden: + * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * deletion of a backup vault instance that is being referenced by an active backup plan. + :param pulumi.Input[bool] force_update: If set, allow update to extend the minimum enforced retention for backup vault. This overrides + the restriction against conflicting retention periods. This conflict may occur when the + expiration schedule defined by the associated backup plan is shorter than the minimum + retention set by the backup vault. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. Resource labels to represent user provided metadata. + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field `effective_labels` for all of the labels present on the resource. + :param pulumi.Input[str] location: The GCP location for the backup vault. + :param pulumi.Input[str] name: Output only. Identifier. The resource name. + :param pulumi.Input[str] project: The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] pulumi_labels: The combination of labels configured directly on the resource + and default labels configured on the provider. + :param pulumi.Input[str] service_account: Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + :param pulumi.Input[str] state: Output only. The BackupVault resource instance state. + Possible values: + STATE_UNSPECIFIED + CREATING + ACTIVE + DELETING + ERROR + :param pulumi.Input[str] total_stored_bytes: Output only. Total size of the storage used by all backup resources. + :param pulumi.Input[str] uid: Output only. Output only Immutable after resource creation until resource deletion. + :param pulumi.Input[str] update_time: Output only. The time when the instance was updated. + """ + opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) + + __props__ = _BackupVaultState.__new__(_BackupVaultState) + + __props__.__dict__["allow_missing"] = allow_missing + __props__.__dict__["annotations"] = annotations + __props__.__dict__["backup_count"] = backup_count + __props__.__dict__["backup_minimum_enforced_retention_duration"] = backup_minimum_enforced_retention_duration + __props__.__dict__["backup_vault_id"] = backup_vault_id + __props__.__dict__["create_time"] = create_time + __props__.__dict__["deletable"] = deletable + __props__.__dict__["description"] = description + __props__.__dict__["effective_annotations"] = effective_annotations + __props__.__dict__["effective_labels"] = effective_labels + __props__.__dict__["effective_time"] = effective_time + __props__.__dict__["etag"] = etag + __props__.__dict__["force_delete"] = force_delete + __props__.__dict__["force_update"] = force_update + __props__.__dict__["labels"] = labels + __props__.__dict__["location"] = location + __props__.__dict__["name"] = name + __props__.__dict__["project"] = project + __props__.__dict__["pulumi_labels"] = pulumi_labels + __props__.__dict__["service_account"] = service_account + __props__.__dict__["state"] = state + __props__.__dict__["total_stored_bytes"] = total_stored_bytes + __props__.__dict__["uid"] = uid + __props__.__dict__["update_time"] = update_time + return BackupVault(resource_name, opts=opts, __props__=__props__) + + @property + @pulumi.getter(name="allowMissing") + def allow_missing(self) -> pulumi.Output[Optional[bool]]: + """ + Allow idempotent deletion of backup vault. The request will still succeed in case the backup vault does not exist. + """ + return pulumi.get(self, "allow_missing") + + @property + @pulumi.getter + def annotations(self) -> pulumi.Output[Optional[Mapping[str, str]]]: + """ + Optional. User annotations. See https://google.aip.dev/128#annotations + Stores small amounts of arbitrary data. + **Note**: This field is non-authoritative, and will only manage the annotations present in your configuration. + Please refer to the field `effective_annotations` for all of the annotations present on the resource. + """ + return pulumi.get(self, "annotations") + + @property + @pulumi.getter(name="backupCount") + def backup_count(self) -> pulumi.Output[str]: + """ + Output only. The number of backups in this backup vault. + """ + return pulumi.get(self, "backup_count") + + @property + @pulumi.getter(name="backupMinimumEnforcedRetentionDuration") + def backup_minimum_enforced_retention_duration(self) -> pulumi.Output[str]: + """ + Required. The default and minimum enforced retention for each backup within the backup vault. The enforced retention for each backup can be extended. + """ + return pulumi.get(self, "backup_minimum_enforced_retention_duration") + + @property + @pulumi.getter(name="backupVaultId") + def backup_vault_id(self) -> pulumi.Output[str]: + """ + Required. ID of the requesting object. + + + - - - + """ + return pulumi.get(self, "backup_vault_id") + + @property + @pulumi.getter(name="createTime") + def create_time(self) -> pulumi.Output[str]: + """ + Output only. The time when the instance was created. + """ + return pulumi.get(self, "create_time") + + @property + @pulumi.getter + def deletable(self) -> pulumi.Output[bool]: + """ + Output only. Set to true when there are no backups nested under this resource. + """ + return pulumi.get(self, "deletable") + + @property + @pulumi.getter + def description(self) -> pulumi.Output[Optional[str]]: + """ + Optional. The description of the BackupVault instance (2048 characters or less). + """ + return pulumi.get(self, "description") + + @property + @pulumi.getter(name="effectiveAnnotations") + def effective_annotations(self) -> pulumi.Output[Mapping[str, str]]: + return pulumi.get(self, "effective_annotations") + + @property + @pulumi.getter(name="effectiveLabels") + def effective_labels(self) -> pulumi.Output[Mapping[str, str]]: + """ + All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. + """ + return pulumi.get(self, "effective_labels") + + @property + @pulumi.getter(name="effectiveTime") + def effective_time(self) -> pulumi.Output[Optional[str]]: + """ + Optional. Time after which the BackupVault resource is locked. + """ + return pulumi.get(self, "effective_time") + + @property + @pulumi.getter + def etag(self) -> pulumi.Output[str]: + """ + Optional. Server specified ETag for the backup vault resource to prevent simultaneous updates from overwiting each other. + """ + return pulumi.get(self, "etag") + + @property + @pulumi.getter(name="forceDelete") + def force_delete(self) -> pulumi.Output[Optional[bool]]: + """ + If set, the following restrictions against deletion of the backup vault instance can be overridden: + * deletion of a backup vault instance containing no backups, but still containing empty datasources. + * deletion of a backup vault instance that is being referenced by an active backup plan. + """ + return pulumi.get(self, "force_delete") + + @property + @pulumi.getter(name="forceUpdate") + def force_update(self) -> pulumi.Output[Optional[bool]]: + """ + If set, allow update to extend the minimum enforced retention for backup vault. This overrides + the restriction against conflicting retention periods. This conflict may occur when the + expiration schedule defined by the associated backup plan is shorter than the minimum + retention set by the backup vault. + """ + return pulumi.get(self, "force_update") + + @property + @pulumi.getter + def labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]: + """ + Optional. Resource labels to represent user provided metadata. + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field `effective_labels` for all of the labels present on the resource. + """ + return pulumi.get(self, "labels") + + @property + @pulumi.getter + def location(self) -> pulumi.Output[str]: + """ + The GCP location for the backup vault. + """ + return pulumi.get(self, "location") + + @property + @pulumi.getter + def name(self) -> pulumi.Output[str]: + """ + Output only. Identifier. The resource name. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter + def project(self) -> pulumi.Output[str]: + """ + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + """ + return pulumi.get(self, "project") + + @property + @pulumi.getter(name="pulumiLabels") + def pulumi_labels(self) -> pulumi.Output[Mapping[str, str]]: + """ + The combination of labels configured directly on the resource + and default labels configured on the provider. + """ + return pulumi.get(self, "pulumi_labels") + + @property + @pulumi.getter(name="serviceAccount") + def service_account(self) -> pulumi.Output[str]: + """ + Output only. Service account used by the BackupVault Service for this BackupVault. The user should grant this account permissions in their workload project to enable the service to run backups and restores there. + """ + return pulumi.get(self, "service_account") + + @property + @pulumi.getter + def state(self) -> pulumi.Output[str]: + """ + Output only. The BackupVault resource instance state. + Possible values: + STATE_UNSPECIFIED + CREATING + ACTIVE + DELETING + ERROR + """ + return pulumi.get(self, "state") + + @property + @pulumi.getter(name="totalStoredBytes") + def total_stored_bytes(self) -> pulumi.Output[str]: + """ + Output only. Total size of the storage used by all backup resources. + """ + return pulumi.get(self, "total_stored_bytes") + + @property + @pulumi.getter + def uid(self) -> pulumi.Output[str]: + """ + Output only. Output only Immutable after resource creation until resource deletion. + """ + return pulumi.get(self, "uid") + + @property + @pulumi.getter(name="updateTime") + def update_time(self) -> pulumi.Output[str]: + """ + Output only. The time when the instance was updated. + """ + return pulumi.get(self, "update_time") + diff --git a/sdk/python/pulumi_gcp/bigquery/_inputs.py b/sdk/python/pulumi_gcp/bigquery/_inputs.py index df70189b35..19e07e0f17 100644 --- a/sdk/python/pulumi_gcp/bigquery/_inputs.py +++ b/sdk/python/pulumi_gcp/bigquery/_inputs.py @@ -49,6 +49,8 @@ 'ConnectionSparkSparkHistoryServerConfigArgsDict', 'DataTransferConfigEmailPreferencesArgs', 'DataTransferConfigEmailPreferencesArgsDict', + 'DataTransferConfigEncryptionConfigurationArgs', + 'DataTransferConfigEncryptionConfigurationArgsDict', 'DataTransferConfigScheduleOptionsArgs', 'DataTransferConfigScheduleOptionsArgsDict', 'DataTransferConfigSensitiveParamsArgs', @@ -1244,6 +1246,37 @@ def enable_failure_email(self, value: pulumi.Input[bool]): pulumi.set(self, "enable_failure_email", value) +if not MYPY: + class DataTransferConfigEncryptionConfigurationArgsDict(TypedDict): + kms_key_name: pulumi.Input[str] + """ + The name of the KMS key used for encrypting BigQuery data. + """ +elif False: + DataTransferConfigEncryptionConfigurationArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class DataTransferConfigEncryptionConfigurationArgs: + def __init__(__self__, *, + kms_key_name: pulumi.Input[str]): + """ + :param pulumi.Input[str] kms_key_name: The name of the KMS key used for encrypting BigQuery data. + """ + pulumi.set(__self__, "kms_key_name", kms_key_name) + + @property + @pulumi.getter(name="kmsKeyName") + def kms_key_name(self) -> pulumi.Input[str]: + """ + The name of the KMS key used for encrypting BigQuery data. + """ + return pulumi.get(self, "kms_key_name") + + @kms_key_name.setter + def kms_key_name(self, value: pulumi.Input[str]): + pulumi.set(self, "kms_key_name", value) + + if not MYPY: class DataTransferConfigScheduleOptionsArgsDict(TypedDict): disable_auto_scheduling: NotRequired[pulumi.Input[bool]] diff --git a/sdk/python/pulumi_gcp/bigquery/data_transfer_config.py b/sdk/python/pulumi_gcp/bigquery/data_transfer_config.py index 55dd10cc60..b5fcb2881f 100644 --- a/sdk/python/pulumi_gcp/bigquery/data_transfer_config.py +++ b/sdk/python/pulumi_gcp/bigquery/data_transfer_config.py @@ -28,6 +28,7 @@ def __init__(__self__, *, destination_dataset_id: Optional[pulumi.Input[str]] = None, disabled: Optional[pulumi.Input[bool]] = None, email_preferences: Optional[pulumi.Input['DataTransferConfigEmailPreferencesArgs']] = None, + encryption_configuration: Optional[pulumi.Input['DataTransferConfigEncryptionConfigurationArgs']] = None, location: Optional[pulumi.Input[str]] = None, notification_pubsub_topic: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, @@ -56,6 +57,8 @@ def __init__(__self__, *, :param pulumi.Input['DataTransferConfigEmailPreferencesArgs'] email_preferences: Email notifications will be sent according to these preferences to the email address of the user who owns this transfer config. Structure is documented below. + :param pulumi.Input['DataTransferConfigEncryptionConfigurationArgs'] encryption_configuration: Represents the encryption configuration for a transfer. + Structure is documented below. :param pulumi.Input[str] location: The geographic location where the transfer config should reside. Examples: US, EU, asia-northeast1. The default value is US. :param pulumi.Input[str] notification_pubsub_topic: Pub/Sub topic where notifications will be sent after transfer runs @@ -95,6 +98,8 @@ def __init__(__self__, *, pulumi.set(__self__, "disabled", disabled) if email_preferences is not None: pulumi.set(__self__, "email_preferences", email_preferences) + if encryption_configuration is not None: + pulumi.set(__self__, "encryption_configuration", encryption_configuration) if location is not None: pulumi.set(__self__, "location", location) if notification_pubsub_topic is not None: @@ -206,6 +211,19 @@ def email_preferences(self) -> Optional[pulumi.Input['DataTransferConfigEmailPre def email_preferences(self, value: Optional[pulumi.Input['DataTransferConfigEmailPreferencesArgs']]): pulumi.set(self, "email_preferences", value) + @property + @pulumi.getter(name="encryptionConfiguration") + def encryption_configuration(self) -> Optional[pulumi.Input['DataTransferConfigEncryptionConfigurationArgs']]: + """ + Represents the encryption configuration for a transfer. + Structure is documented below. + """ + return pulumi.get(self, "encryption_configuration") + + @encryption_configuration.setter + def encryption_configuration(self, value: Optional[pulumi.Input['DataTransferConfigEncryptionConfigurationArgs']]): + pulumi.set(self, "encryption_configuration", value) + @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: @@ -320,6 +338,7 @@ def __init__(__self__, *, disabled: Optional[pulumi.Input[bool]] = None, display_name: Optional[pulumi.Input[str]] = None, email_preferences: Optional[pulumi.Input['DataTransferConfigEmailPreferencesArgs']] = None, + encryption_configuration: Optional[pulumi.Input['DataTransferConfigEncryptionConfigurationArgs']] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, notification_pubsub_topic: Optional[pulumi.Input[str]] = None, @@ -343,6 +362,8 @@ def __init__(__self__, *, :param pulumi.Input['DataTransferConfigEmailPreferencesArgs'] email_preferences: Email notifications will be sent according to these preferences to the email address of the user who owns this transfer config. Structure is documented below. + :param pulumi.Input['DataTransferConfigEncryptionConfigurationArgs'] encryption_configuration: Represents the encryption configuration for a transfer. + Structure is documented below. :param pulumi.Input[str] location: The geographic location where the transfer config should reside. Examples: US, EU, asia-northeast1. The default value is US. :param pulumi.Input[str] name: The resource name of the transfer config. Transfer config names have the @@ -395,6 +416,8 @@ def __init__(__self__, *, pulumi.set(__self__, "display_name", display_name) if email_preferences is not None: pulumi.set(__self__, "email_preferences", email_preferences) + if encryption_configuration is not None: + pulumi.set(__self__, "encryption_configuration", encryption_configuration) if location is not None: pulumi.set(__self__, "location", location) if name is not None: @@ -492,6 +515,19 @@ def email_preferences(self) -> Optional[pulumi.Input['DataTransferConfigEmailPre def email_preferences(self, value: Optional[pulumi.Input['DataTransferConfigEmailPreferencesArgs']]): pulumi.set(self, "email_preferences", value) + @property + @pulumi.getter(name="encryptionConfiguration") + def encryption_configuration(self) -> Optional[pulumi.Input['DataTransferConfigEncryptionConfigurationArgs']]: + """ + Represents the encryption configuration for a transfer. + Structure is documented below. + """ + return pulumi.get(self, "encryption_configuration") + + @encryption_configuration.setter + def encryption_configuration(self, value: Optional[pulumi.Input['DataTransferConfigEncryptionConfigurationArgs']]): + pulumi.set(self, "encryption_configuration", value) + @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: @@ -642,6 +678,7 @@ def __init__(__self__, disabled: Optional[pulumi.Input[bool]] = None, display_name: Optional[pulumi.Input[str]] = None, email_preferences: Optional[pulumi.Input[Union['DataTransferConfigEmailPreferencesArgs', 'DataTransferConfigEmailPreferencesArgsDict']]] = None, + encryption_configuration: Optional[pulumi.Input[Union['DataTransferConfigEncryptionConfigurationArgs', 'DataTransferConfigEncryptionConfigurationArgsDict']]] = None, location: Optional[pulumi.Input[str]] = None, notification_pubsub_topic: Optional[pulumi.Input[str]] = None, params: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, @@ -693,6 +730,45 @@ def __init__(__self__, }, opts = pulumi.ResourceOptions(depends_on=[permissions])) ``` + ### Bigquerydatatransfer Config Cmek + + ```python + import pulumi + import pulumi_gcp as gcp + + project = gcp.organizations.get_project() + permissions = gcp.projects.IAMMember("permissions", + project=project.project_id, + role="roles/iam.serviceAccountTokenCreator", + member=f"serviceAccount:service-{project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com") + my_dataset = gcp.bigquery.Dataset("my_dataset", + dataset_id="example_dataset", + friendly_name="foo", + description="bar", + location="asia-northeast1", + opts = pulumi.ResourceOptions(depends_on=[permissions])) + key_ring = gcp.kms.KeyRing("key_ring", + name="example-keyring", + location="us") + crypto_key = gcp.kms.CryptoKey("crypto_key", + name="example-key", + key_ring=key_ring.id) + query_config_cmek = gcp.bigquery.DataTransferConfig("query_config_cmek", + display_name="", + location="asia-northeast1", + data_source_id="scheduled_query", + schedule="first sunday of quarter 00:00", + destination_dataset_id=my_dataset.dataset_id, + params={ + "destination_table_name_template": "my_table", + "write_disposition": "WRITE_APPEND", + "query": "SELECT name FROM tabl WHERE x = 'y'", + }, + encryption_configuration={ + "kms_key_name": crypto_key.id, + }, + opts = pulumi.ResourceOptions(depends_on=[permissions])) + ``` ### Bigquerydatatransfer Config Salesforce ```python @@ -713,9 +789,7 @@ def __init__(__self__, params={ "connector.authentication.oauth.clientId": "client-id", "connector.authentication.oauth.clientSecret": "client-secret", - "connector.authentication.username": "username", - "connector.authentication.password": "password", - "connector.authentication.securityToken": "security-token", + "connector.authentication.oauth.myDomain": "MyDomainName", "assets": "[\\"asset-a\\",\\"asset-b\\"]", }) ``` @@ -746,6 +820,8 @@ def __init__(__self__, :param pulumi.Input[Union['DataTransferConfigEmailPreferencesArgs', 'DataTransferConfigEmailPreferencesArgsDict']] email_preferences: Email notifications will be sent according to these preferences to the email address of the user who owns this transfer config. Structure is documented below. + :param pulumi.Input[Union['DataTransferConfigEncryptionConfigurationArgs', 'DataTransferConfigEncryptionConfigurationArgsDict']] encryption_configuration: Represents the encryption configuration for a transfer. + Structure is documented below. :param pulumi.Input[str] location: The geographic location where the transfer config should reside. Examples: US, EU, asia-northeast1. The default value is US. :param pulumi.Input[str] notification_pubsub_topic: Pub/Sub topic where notifications will be sent after transfer runs @@ -829,6 +905,45 @@ def __init__(__self__, }, opts = pulumi.ResourceOptions(depends_on=[permissions])) ``` + ### Bigquerydatatransfer Config Cmek + + ```python + import pulumi + import pulumi_gcp as gcp + + project = gcp.organizations.get_project() + permissions = gcp.projects.IAMMember("permissions", + project=project.project_id, + role="roles/iam.serviceAccountTokenCreator", + member=f"serviceAccount:service-{project.number}@gcp-sa-bigquerydatatransfer.iam.gserviceaccount.com") + my_dataset = gcp.bigquery.Dataset("my_dataset", + dataset_id="example_dataset", + friendly_name="foo", + description="bar", + location="asia-northeast1", + opts = pulumi.ResourceOptions(depends_on=[permissions])) + key_ring = gcp.kms.KeyRing("key_ring", + name="example-keyring", + location="us") + crypto_key = gcp.kms.CryptoKey("crypto_key", + name="example-key", + key_ring=key_ring.id) + query_config_cmek = gcp.bigquery.DataTransferConfig("query_config_cmek", + display_name="", + location="asia-northeast1", + data_source_id="scheduled_query", + schedule="first sunday of quarter 00:00", + destination_dataset_id=my_dataset.dataset_id, + params={ + "destination_table_name_template": "my_table", + "write_disposition": "WRITE_APPEND", + "query": "SELECT name FROM tabl WHERE x = 'y'", + }, + encryption_configuration={ + "kms_key_name": crypto_key.id, + }, + opts = pulumi.ResourceOptions(depends_on=[permissions])) + ``` ### Bigquerydatatransfer Config Salesforce ```python @@ -849,9 +964,7 @@ def __init__(__self__, params={ "connector.authentication.oauth.clientId": "client-id", "connector.authentication.oauth.clientSecret": "client-secret", - "connector.authentication.username": "username", - "connector.authentication.password": "password", - "connector.authentication.securityToken": "security-token", + "connector.authentication.oauth.myDomain": "MyDomainName", "assets": "[\\"asset-a\\",\\"asset-b\\"]", }) ``` @@ -889,6 +1002,7 @@ def _internal_init(__self__, disabled: Optional[pulumi.Input[bool]] = None, display_name: Optional[pulumi.Input[str]] = None, email_preferences: Optional[pulumi.Input[Union['DataTransferConfigEmailPreferencesArgs', 'DataTransferConfigEmailPreferencesArgsDict']]] = None, + encryption_configuration: Optional[pulumi.Input[Union['DataTransferConfigEncryptionConfigurationArgs', 'DataTransferConfigEncryptionConfigurationArgsDict']]] = None, location: Optional[pulumi.Input[str]] = None, notification_pubsub_topic: Optional[pulumi.Input[str]] = None, params: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, @@ -916,6 +1030,7 @@ def _internal_init(__self__, raise TypeError("Missing required property 'display_name'") __props__.__dict__["display_name"] = display_name __props__.__dict__["email_preferences"] = email_preferences + __props__.__dict__["encryption_configuration"] = encryption_configuration __props__.__dict__["location"] = location __props__.__dict__["notification_pubsub_topic"] = notification_pubsub_topic if params is None and not opts.urn: @@ -943,6 +1058,7 @@ def get(resource_name: str, disabled: Optional[pulumi.Input[bool]] = None, display_name: Optional[pulumi.Input[str]] = None, email_preferences: Optional[pulumi.Input[Union['DataTransferConfigEmailPreferencesArgs', 'DataTransferConfigEmailPreferencesArgsDict']]] = None, + encryption_configuration: Optional[pulumi.Input[Union['DataTransferConfigEncryptionConfigurationArgs', 'DataTransferConfigEncryptionConfigurationArgsDict']]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, notification_pubsub_topic: Optional[pulumi.Input[str]] = None, @@ -971,6 +1087,8 @@ def get(resource_name: str, :param pulumi.Input[Union['DataTransferConfigEmailPreferencesArgs', 'DataTransferConfigEmailPreferencesArgsDict']] email_preferences: Email notifications will be sent according to these preferences to the email address of the user who owns this transfer config. Structure is documented below. + :param pulumi.Input[Union['DataTransferConfigEncryptionConfigurationArgs', 'DataTransferConfigEncryptionConfigurationArgsDict']] encryption_configuration: Represents the encryption configuration for a transfer. + Structure is documented below. :param pulumi.Input[str] location: The geographic location where the transfer config should reside. Examples: US, EU, asia-northeast1. The default value is US. :param pulumi.Input[str] name: The resource name of the transfer config. Transfer config names have the @@ -1021,6 +1139,7 @@ def get(resource_name: str, __props__.__dict__["disabled"] = disabled __props__.__dict__["display_name"] = display_name __props__.__dict__["email_preferences"] = email_preferences + __props__.__dict__["encryption_configuration"] = encryption_configuration __props__.__dict__["location"] = location __props__.__dict__["name"] = name __props__.__dict__["notification_pubsub_topic"] = notification_pubsub_topic @@ -1086,6 +1205,15 @@ def email_preferences(self) -> pulumi.Output[Optional['outputs.DataTransferConfi """ return pulumi.get(self, "email_preferences") + @property + @pulumi.getter(name="encryptionConfiguration") + def encryption_configuration(self) -> pulumi.Output[Optional['outputs.DataTransferConfigEncryptionConfiguration']]: + """ + Represents the encryption configuration for a transfer. + Structure is documented below. + """ + return pulumi.get(self, "encryption_configuration") + @property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: diff --git a/sdk/python/pulumi_gcp/bigquery/outputs.py b/sdk/python/pulumi_gcp/bigquery/outputs.py index ed46996b0f..480ca63ae1 100644 --- a/sdk/python/pulumi_gcp/bigquery/outputs.py +++ b/sdk/python/pulumi_gcp/bigquery/outputs.py @@ -33,6 +33,7 @@ 'ConnectionSparkMetastoreServiceConfig', 'ConnectionSparkSparkHistoryServerConfig', 'DataTransferConfigEmailPreferences', + 'DataTransferConfigEncryptionConfiguration', 'DataTransferConfigScheduleOptions', 'DataTransferConfigSensitiveParams', 'DatasetAccess', @@ -985,6 +986,41 @@ def enable_failure_email(self) -> bool: return pulumi.get(self, "enable_failure_email") +@pulumi.output_type +class DataTransferConfigEncryptionConfiguration(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "kmsKeyName": + suggest = "kms_key_name" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in DataTransferConfigEncryptionConfiguration. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + DataTransferConfigEncryptionConfiguration.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + DataTransferConfigEncryptionConfiguration.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + kms_key_name: str): + """ + :param str kms_key_name: The name of the KMS key used for encrypting BigQuery data. + """ + pulumi.set(__self__, "kms_key_name", kms_key_name) + + @property + @pulumi.getter(name="kmsKeyName") + def kms_key_name(self) -> str: + """ + The name of the KMS key used for encrypting BigQuery data. + """ + return pulumi.get(self, "kms_key_name") + + @pulumi.output_type class DataTransferConfigScheduleOptions(dict): @staticmethod diff --git a/sdk/python/pulumi_gcp/bigqueryanalyticshub/_inputs.py b/sdk/python/pulumi_gcp/bigqueryanalyticshub/_inputs.py index b6a53ab0f7..7a01fa4d23 100644 --- a/sdk/python/pulumi_gcp/bigqueryanalyticshub/_inputs.py +++ b/sdk/python/pulumi_gcp/bigqueryanalyticshub/_inputs.py @@ -19,8 +19,16 @@ 'DataExchangeIamBindingConditionArgsDict', 'DataExchangeIamMemberConditionArgs', 'DataExchangeIamMemberConditionArgsDict', + 'DataExchangeSharingEnvironmentConfigArgs', + 'DataExchangeSharingEnvironmentConfigArgsDict', + 'DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs', + 'DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgsDict', + 'DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs', + 'DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgsDict', 'ListingBigqueryDatasetArgs', 'ListingBigqueryDatasetArgsDict', + 'ListingBigqueryDatasetSelectedResourceArgs', + 'ListingBigqueryDatasetSelectedResourceArgsDict', 'ListingDataProviderArgs', 'ListingDataProviderArgsDict', 'ListingIamBindingConditionArgs', @@ -129,13 +137,92 @@ def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) +if not MYPY: + class DataExchangeSharingEnvironmentConfigArgsDict(TypedDict): + dcr_exchange_config: NotRequired[pulumi.Input['DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgsDict']] + """ + Data Clean Room (DCR), used for privacy-safe and secured data sharing. + """ + default_exchange_config: NotRequired[pulumi.Input['DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgsDict']] + """ + Default Analytics Hub data exchange, used for secured data sharing. + """ +elif False: + DataExchangeSharingEnvironmentConfigArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class DataExchangeSharingEnvironmentConfigArgs: + def __init__(__self__, *, + dcr_exchange_config: Optional[pulumi.Input['DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs']] = None, + default_exchange_config: Optional[pulumi.Input['DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs']] = None): + """ + :param pulumi.Input['DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs'] dcr_exchange_config: Data Clean Room (DCR), used for privacy-safe and secured data sharing. + :param pulumi.Input['DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs'] default_exchange_config: Default Analytics Hub data exchange, used for secured data sharing. + """ + if dcr_exchange_config is not None: + pulumi.set(__self__, "dcr_exchange_config", dcr_exchange_config) + if default_exchange_config is not None: + pulumi.set(__self__, "default_exchange_config", default_exchange_config) + + @property + @pulumi.getter(name="dcrExchangeConfig") + def dcr_exchange_config(self) -> Optional[pulumi.Input['DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs']]: + """ + Data Clean Room (DCR), used for privacy-safe and secured data sharing. + """ + return pulumi.get(self, "dcr_exchange_config") + + @dcr_exchange_config.setter + def dcr_exchange_config(self, value: Optional[pulumi.Input['DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs']]): + pulumi.set(self, "dcr_exchange_config", value) + + @property + @pulumi.getter(name="defaultExchangeConfig") + def default_exchange_config(self) -> Optional[pulumi.Input['DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs']]: + """ + Default Analytics Hub data exchange, used for secured data sharing. + """ + return pulumi.get(self, "default_exchange_config") + + @default_exchange_config.setter + def default_exchange_config(self, value: Optional[pulumi.Input['DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs']]): + pulumi.set(self, "default_exchange_config", value) + + +if not MYPY: + class DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgsDict(TypedDict): + pass +elif False: + DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs: + def __init__(__self__): + pass + + +if not MYPY: + class DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgsDict(TypedDict): + pass +elif False: + DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs: + def __init__(__self__): + pass + + if not MYPY: class ListingBigqueryDatasetArgsDict(TypedDict): dataset: pulumi.Input[str] """ Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 - - - - - + """ + selected_resources: NotRequired[pulumi.Input[Sequence[pulumi.Input['ListingBigqueryDatasetSelectedResourceArgsDict']]]] + """ + Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + Structure is documented below. """ elif False: ListingBigqueryDatasetArgsDict: TypeAlias = Mapping[str, Any] @@ -143,21 +230,22 @@ class ListingBigqueryDatasetArgsDict(TypedDict): @pulumi.input_type class ListingBigqueryDatasetArgs: def __init__(__self__, *, - dataset: pulumi.Input[str]): + dataset: pulumi.Input[str], + selected_resources: Optional[pulumi.Input[Sequence[pulumi.Input['ListingBigqueryDatasetSelectedResourceArgs']]]] = None): """ :param pulumi.Input[str] dataset: Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 - - - - - + :param pulumi.Input[Sequence[pulumi.Input['ListingBigqueryDatasetSelectedResourceArgs']]] selected_resources: Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + Structure is documented below. """ pulumi.set(__self__, "dataset", dataset) + if selected_resources is not None: + pulumi.set(__self__, "selected_resources", selected_resources) @property @pulumi.getter def dataset(self) -> pulumi.Input[str]: """ Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 - - - - - """ return pulumi.get(self, "dataset") @@ -165,6 +253,57 @@ def dataset(self) -> pulumi.Input[str]: def dataset(self, value: pulumi.Input[str]): pulumi.set(self, "dataset", value) + @property + @pulumi.getter(name="selectedResources") + def selected_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ListingBigqueryDatasetSelectedResourceArgs']]]]: + """ + Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + Structure is documented below. + """ + return pulumi.get(self, "selected_resources") + + @selected_resources.setter + def selected_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ListingBigqueryDatasetSelectedResourceArgs']]]]): + pulumi.set(self, "selected_resources", value) + + +if not MYPY: + class ListingBigqueryDatasetSelectedResourceArgsDict(TypedDict): + table: NotRequired[pulumi.Input[str]] + """ + Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + + - - - + """ +elif False: + ListingBigqueryDatasetSelectedResourceArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class ListingBigqueryDatasetSelectedResourceArgs: + def __init__(__self__, *, + table: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] table: Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + + - - - + """ + if table is not None: + pulumi.set(__self__, "table", table) + + @property + @pulumi.getter + def table(self) -> Optional[pulumi.Input[str]]: + """ + Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + + - - - + """ + return pulumi.get(self, "table") + + @table.setter + def table(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "table", value) + if not MYPY: class ListingDataProviderArgsDict(TypedDict): @@ -368,6 +507,11 @@ class ListingRestrictedExportConfigArgsDict(TypedDict): """ If true, enable restricted export. """ + restrict_direct_table_access: NotRequired[pulumi.Input[bool]] + """ + (Output) + If true, restrict direct table access(read api/tabledata.list) on linked table. + """ restrict_query_result: NotRequired[pulumi.Input[bool]] """ If true, restrict export of query result derived from restricted linked dataset table. @@ -379,13 +523,18 @@ class ListingRestrictedExportConfigArgsDict(TypedDict): class ListingRestrictedExportConfigArgs: def __init__(__self__, *, enabled: Optional[pulumi.Input[bool]] = None, + restrict_direct_table_access: Optional[pulumi.Input[bool]] = None, restrict_query_result: Optional[pulumi.Input[bool]] = None): """ :param pulumi.Input[bool] enabled: If true, enable restricted export. + :param pulumi.Input[bool] restrict_direct_table_access: (Output) + If true, restrict direct table access(read api/tabledata.list) on linked table. :param pulumi.Input[bool] restrict_query_result: If true, restrict export of query result derived from restricted linked dataset table. """ if enabled is not None: pulumi.set(__self__, "enabled", enabled) + if restrict_direct_table_access is not None: + pulumi.set(__self__, "restrict_direct_table_access", restrict_direct_table_access) if restrict_query_result is not None: pulumi.set(__self__, "restrict_query_result", restrict_query_result) @@ -401,6 +550,19 @@ def enabled(self) -> Optional[pulumi.Input[bool]]: def enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enabled", value) + @property + @pulumi.getter(name="restrictDirectTableAccess") + def restrict_direct_table_access(self) -> Optional[pulumi.Input[bool]]: + """ + (Output) + If true, restrict direct table access(read api/tabledata.list) on linked table. + """ + return pulumi.get(self, "restrict_direct_table_access") + + @restrict_direct_table_access.setter + def restrict_direct_table_access(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "restrict_direct_table_access", value) + @property @pulumi.getter(name="restrictQueryResult") def restrict_query_result(self) -> Optional[pulumi.Input[bool]]: diff --git a/sdk/python/pulumi_gcp/bigqueryanalyticshub/data_exchange.py b/sdk/python/pulumi_gcp/bigqueryanalyticshub/data_exchange.py index 917fe23a3b..66d624b278 100644 --- a/sdk/python/pulumi_gcp/bigqueryanalyticshub/data_exchange.py +++ b/sdk/python/pulumi_gcp/bigqueryanalyticshub/data_exchange.py @@ -13,6 +13,8 @@ else: from typing_extensions import NotRequired, TypedDict, TypeAlias from .. import _utilities +from . import outputs +from ._inputs import * __all__ = ['DataExchangeArgs', 'DataExchange'] @@ -26,7 +28,8 @@ def __init__(__self__, *, documentation: Optional[pulumi.Input[str]] = None, icon: Optional[pulumi.Input[str]] = None, primary_contact: Optional[pulumi.Input[str]] = None, - project: Optional[pulumi.Input[str]] = None): + project: Optional[pulumi.Input[str]] = None, + sharing_environment_config: Optional[pulumi.Input['DataExchangeSharingEnvironmentConfigArgs']] = None): """ The set of arguments for constructing a DataExchange resource. :param pulumi.Input[str] data_exchange_id: The ID of the data exchange. Must contain only Unicode letters, numbers (0-9), underscores (_). Should not use characters that require URL-escaping, or characters outside of ASCII, spaces. @@ -41,6 +44,9 @@ def __init__(__self__, *, :param pulumi.Input[str] primary_contact: Email or URL of the primary point of contact of the data exchange. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. + :param pulumi.Input['DataExchangeSharingEnvironmentConfigArgs'] sharing_environment_config: Configurable data sharing environment option for a data exchange. + This field is required for data clean room exchanges. + Structure is documented below. """ pulumi.set(__self__, "data_exchange_id", data_exchange_id) pulumi.set(__self__, "display_name", display_name) @@ -55,6 +61,8 @@ def __init__(__self__, *, pulumi.set(__self__, "primary_contact", primary_contact) if project is not None: pulumi.set(__self__, "project", project) + if sharing_environment_config is not None: + pulumi.set(__self__, "sharing_environment_config", sharing_environment_config) @property @pulumi.getter(name="dataExchangeId") @@ -156,6 +164,20 @@ def project(self) -> Optional[pulumi.Input[str]]: def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) + @property + @pulumi.getter(name="sharingEnvironmentConfig") + def sharing_environment_config(self) -> Optional[pulumi.Input['DataExchangeSharingEnvironmentConfigArgs']]: + """ + Configurable data sharing environment option for a data exchange. + This field is required for data clean room exchanges. + Structure is documented below. + """ + return pulumi.get(self, "sharing_environment_config") + + @sharing_environment_config.setter + def sharing_environment_config(self, value: Optional[pulumi.Input['DataExchangeSharingEnvironmentConfigArgs']]): + pulumi.set(self, "sharing_environment_config", value) + @pulumi.input_type class _DataExchangeState: @@ -169,7 +191,8 @@ def __init__(__self__, *, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, primary_contact: Optional[pulumi.Input[str]] = None, - project: Optional[pulumi.Input[str]] = None): + project: Optional[pulumi.Input[str]] = None, + sharing_environment_config: Optional[pulumi.Input['DataExchangeSharingEnvironmentConfigArgs']] = None): """ Input properties used for looking up and filtering DataExchange resources. :param pulumi.Input[str] data_exchange_id: The ID of the data exchange. Must contain only Unicode letters, numbers (0-9), underscores (_). Should not use characters that require URL-escaping, or characters outside of ASCII, spaces. @@ -187,6 +210,9 @@ def __init__(__self__, *, :param pulumi.Input[str] primary_contact: Email or URL of the primary point of contact of the data exchange. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. + :param pulumi.Input['DataExchangeSharingEnvironmentConfigArgs'] sharing_environment_config: Configurable data sharing environment option for a data exchange. + This field is required for data clean room exchanges. + Structure is documented below. """ if data_exchange_id is not None: pulumi.set(__self__, "data_exchange_id", data_exchange_id) @@ -208,6 +234,8 @@ def __init__(__self__, *, pulumi.set(__self__, "primary_contact", primary_contact) if project is not None: pulumi.set(__self__, "project", project) + if sharing_environment_config is not None: + pulumi.set(__self__, "sharing_environment_config", sharing_environment_config) @property @pulumi.getter(name="dataExchangeId") @@ -334,6 +362,20 @@ def project(self) -> Optional[pulumi.Input[str]]: def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) + @property + @pulumi.getter(name="sharingEnvironmentConfig") + def sharing_environment_config(self) -> Optional[pulumi.Input['DataExchangeSharingEnvironmentConfigArgs']]: + """ + Configurable data sharing environment option for a data exchange. + This field is required for data clean room exchanges. + Structure is documented below. + """ + return pulumi.get(self, "sharing_environment_config") + + @sharing_environment_config.setter + def sharing_environment_config(self, value: Optional[pulumi.Input['DataExchangeSharingEnvironmentConfigArgs']]): + pulumi.set(self, "sharing_environment_config", value) + class DataExchange(pulumi.CustomResource): @overload @@ -348,6 +390,7 @@ def __init__(__self__, location: Optional[pulumi.Input[str]] = None, primary_contact: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, + sharing_environment_config: Optional[pulumi.Input[Union['DataExchangeSharingEnvironmentConfigArgs', 'DataExchangeSharingEnvironmentConfigArgsDict']]] = None, __props__=None): """ A Bigquery Analytics Hub data exchange @@ -372,6 +415,21 @@ def __init__(__self__, display_name="my_data_exchange", description="example data exchange") ``` + ### Bigquery Analyticshub Data Exchange Dcr + + ```python + import pulumi + import pulumi_gcp as gcp + + data_exchange = gcp.bigqueryanalyticshub.DataExchange("data_exchange", + location="US", + data_exchange_id="dcr_data_exchange", + display_name="dcr_data_exchange", + description="example dcr data exchange", + sharing_environment_config={ + "dcr_exchange_config": {}, + }) + ``` ## Import @@ -417,6 +475,9 @@ def __init__(__self__, :param pulumi.Input[str] primary_contact: Email or URL of the primary point of contact of the data exchange. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. + :param pulumi.Input[Union['DataExchangeSharingEnvironmentConfigArgs', 'DataExchangeSharingEnvironmentConfigArgsDict']] sharing_environment_config: Configurable data sharing environment option for a data exchange. + This field is required for data clean room exchanges. + Structure is documented below. """ ... @overload @@ -447,6 +508,21 @@ def __init__(__self__, display_name="my_data_exchange", description="example data exchange") ``` + ### Bigquery Analyticshub Data Exchange Dcr + + ```python + import pulumi + import pulumi_gcp as gcp + + data_exchange = gcp.bigqueryanalyticshub.DataExchange("data_exchange", + location="US", + data_exchange_id="dcr_data_exchange", + display_name="dcr_data_exchange", + description="example dcr data exchange", + sharing_environment_config={ + "dcr_exchange_config": {}, + }) + ``` ## Import @@ -501,6 +577,7 @@ def _internal_init(__self__, location: Optional[pulumi.Input[str]] = None, primary_contact: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, + sharing_environment_config: Optional[pulumi.Input[Union['DataExchangeSharingEnvironmentConfigArgs', 'DataExchangeSharingEnvironmentConfigArgsDict']]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): @@ -524,6 +601,7 @@ def _internal_init(__self__, __props__.__dict__["location"] = location __props__.__dict__["primary_contact"] = primary_contact __props__.__dict__["project"] = project + __props__.__dict__["sharing_environment_config"] = sharing_environment_config __props__.__dict__["listing_count"] = None __props__.__dict__["name"] = None super(DataExchange, __self__).__init__( @@ -545,7 +623,8 @@ def get(resource_name: str, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, primary_contact: Optional[pulumi.Input[str]] = None, - project: Optional[pulumi.Input[str]] = None) -> 'DataExchange': + project: Optional[pulumi.Input[str]] = None, + sharing_environment_config: Optional[pulumi.Input[Union['DataExchangeSharingEnvironmentConfigArgs', 'DataExchangeSharingEnvironmentConfigArgsDict']]] = None) -> 'DataExchange': """ Get an existing DataExchange resource's state with the given name, id, and optional extra properties used to qualify the lookup. @@ -568,6 +647,9 @@ def get(resource_name: str, :param pulumi.Input[str] primary_contact: Email or URL of the primary point of contact of the data exchange. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. + :param pulumi.Input[Union['DataExchangeSharingEnvironmentConfigArgs', 'DataExchangeSharingEnvironmentConfigArgsDict']] sharing_environment_config: Configurable data sharing environment option for a data exchange. + This field is required for data clean room exchanges. + Structure is documented below. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) @@ -583,6 +665,7 @@ def get(resource_name: str, __props__.__dict__["name"] = name __props__.__dict__["primary_contact"] = primary_contact __props__.__dict__["project"] = project + __props__.__dict__["sharing_environment_config"] = sharing_environment_config return DataExchange(resource_name, opts=opts, __props__=__props__) @property @@ -670,3 +753,13 @@ def project(self) -> pulumi.Output[str]: """ return pulumi.get(self, "project") + @property + @pulumi.getter(name="sharingEnvironmentConfig") + def sharing_environment_config(self) -> pulumi.Output['outputs.DataExchangeSharingEnvironmentConfig']: + """ + Configurable data sharing environment option for a data exchange. + This field is required for data clean room exchanges. + Structure is documented below. + """ + return pulumi.get(self, "sharing_environment_config") + diff --git a/sdk/python/pulumi_gcp/bigqueryanalyticshub/listing.py b/sdk/python/pulumi_gcp/bigqueryanalyticshub/listing.py index d523e4a3d4..9806c2d0d5 100644 --- a/sdk/python/pulumi_gcp/bigqueryanalyticshub/listing.py +++ b/sdk/python/pulumi_gcp/bigqueryanalyticshub/listing.py @@ -613,6 +613,63 @@ def __init__(__self__, "restrict_query_result": True, }) ``` + ### Bigquery Analyticshub Listing Dcr + + ```python + import pulumi + import pulumi_gcp as gcp + + listing = gcp.bigqueryanalyticshub.DataExchange("listing", + location="US", + data_exchange_id="dcr_data_exchange", + display_name="dcr_data_exchange", + description="example dcr data exchange", + sharing_environment_config={ + "dcr_exchange_config": {}, + }) + listing_dataset = gcp.bigquery.Dataset("listing", + dataset_id="dcr_listing", + friendly_name="dcr_listing", + description="example dcr data exchange", + location="US") + listing_table = gcp.bigquery.Table("listing", + deletion_protection=False, + table_id="dcr_listing", + dataset_id=listing_dataset.dataset_id, + schema=\"\"\"[ + { + "name": "name", + "type": "STRING", + "mode": "NULLABLE" + }, + { + "name": "post_abbr", + "type": "STRING", + "mode": "NULLABLE" + }, + { + "name": "date", + "type": "DATE", + "mode": "NULLABLE" + } + ] + \"\"\") + listing_listing = gcp.bigqueryanalyticshub.Listing("listing", + location="US", + data_exchange_id=listing.data_exchange_id, + listing_id="dcr_listing", + display_name="dcr_listing", + description="example dcr data exchange", + bigquery_dataset={ + "dataset": listing_dataset.id, + "selected_resources": [{ + "table": listing_table.id, + }], + }, + restricted_export_config={ + "enabled": True, + }) + ``` ## Import @@ -730,6 +787,63 @@ def __init__(__self__, "restrict_query_result": True, }) ``` + ### Bigquery Analyticshub Listing Dcr + + ```python + import pulumi + import pulumi_gcp as gcp + + listing = gcp.bigqueryanalyticshub.DataExchange("listing", + location="US", + data_exchange_id="dcr_data_exchange", + display_name="dcr_data_exchange", + description="example dcr data exchange", + sharing_environment_config={ + "dcr_exchange_config": {}, + }) + listing_dataset = gcp.bigquery.Dataset("listing", + dataset_id="dcr_listing", + friendly_name="dcr_listing", + description="example dcr data exchange", + location="US") + listing_table = gcp.bigquery.Table("listing", + deletion_protection=False, + table_id="dcr_listing", + dataset_id=listing_dataset.dataset_id, + schema=\"\"\"[ + { + "name": "name", + "type": "STRING", + "mode": "NULLABLE" + }, + { + "name": "post_abbr", + "type": "STRING", + "mode": "NULLABLE" + }, + { + "name": "date", + "type": "DATE", + "mode": "NULLABLE" + } + ] + \"\"\") + listing_listing = gcp.bigqueryanalyticshub.Listing("listing", + location="US", + data_exchange_id=listing.data_exchange_id, + listing_id="dcr_listing", + display_name="dcr_listing", + description="example dcr data exchange", + bigquery_dataset={ + "dataset": listing_dataset.id, + "selected_resources": [{ + "table": listing_table.id, + }], + }, + restricted_export_config={ + "enabled": True, + }) + ``` ## Import diff --git a/sdk/python/pulumi_gcp/bigqueryanalyticshub/outputs.py b/sdk/python/pulumi_gcp/bigqueryanalyticshub/outputs.py index 6952a54308..87f95b2f21 100644 --- a/sdk/python/pulumi_gcp/bigqueryanalyticshub/outputs.py +++ b/sdk/python/pulumi_gcp/bigqueryanalyticshub/outputs.py @@ -13,11 +13,16 @@ else: from typing_extensions import NotRequired, TypedDict, TypeAlias from .. import _utilities +from . import outputs __all__ = [ 'DataExchangeIamBindingCondition', 'DataExchangeIamMemberCondition', + 'DataExchangeSharingEnvironmentConfig', + 'DataExchangeSharingEnvironmentConfigDcrExchangeConfig', + 'DataExchangeSharingEnvironmentConfigDefaultExchangeConfig', 'ListingBigqueryDataset', + 'ListingBigqueryDatasetSelectedResource', 'ListingDataProvider', 'ListingIamBindingCondition', 'ListingIamMemberCondition', @@ -79,26 +84,138 @@ def description(self) -> Optional[str]: return pulumi.get(self, "description") +@pulumi.output_type +class DataExchangeSharingEnvironmentConfig(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "dcrExchangeConfig": + suggest = "dcr_exchange_config" + elif key == "defaultExchangeConfig": + suggest = "default_exchange_config" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in DataExchangeSharingEnvironmentConfig. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + DataExchangeSharingEnvironmentConfig.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + DataExchangeSharingEnvironmentConfig.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + dcr_exchange_config: Optional['outputs.DataExchangeSharingEnvironmentConfigDcrExchangeConfig'] = None, + default_exchange_config: Optional['outputs.DataExchangeSharingEnvironmentConfigDefaultExchangeConfig'] = None): + """ + :param 'DataExchangeSharingEnvironmentConfigDcrExchangeConfigArgs' dcr_exchange_config: Data Clean Room (DCR), used for privacy-safe and secured data sharing. + :param 'DataExchangeSharingEnvironmentConfigDefaultExchangeConfigArgs' default_exchange_config: Default Analytics Hub data exchange, used for secured data sharing. + """ + if dcr_exchange_config is not None: + pulumi.set(__self__, "dcr_exchange_config", dcr_exchange_config) + if default_exchange_config is not None: + pulumi.set(__self__, "default_exchange_config", default_exchange_config) + + @property + @pulumi.getter(name="dcrExchangeConfig") + def dcr_exchange_config(self) -> Optional['outputs.DataExchangeSharingEnvironmentConfigDcrExchangeConfig']: + """ + Data Clean Room (DCR), used for privacy-safe and secured data sharing. + """ + return pulumi.get(self, "dcr_exchange_config") + + @property + @pulumi.getter(name="defaultExchangeConfig") + def default_exchange_config(self) -> Optional['outputs.DataExchangeSharingEnvironmentConfigDefaultExchangeConfig']: + """ + Default Analytics Hub data exchange, used for secured data sharing. + """ + return pulumi.get(self, "default_exchange_config") + + +@pulumi.output_type +class DataExchangeSharingEnvironmentConfigDcrExchangeConfig(dict): + def __init__(__self__): + pass + + +@pulumi.output_type +class DataExchangeSharingEnvironmentConfigDefaultExchangeConfig(dict): + def __init__(__self__): + pass + + @pulumi.output_type class ListingBigqueryDataset(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "selectedResources": + suggest = "selected_resources" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in ListingBigqueryDataset. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + ListingBigqueryDataset.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + ListingBigqueryDataset.__key_warning(key) + return super().get(key, default) + def __init__(__self__, *, - dataset: str): + dataset: str, + selected_resources: Optional[Sequence['outputs.ListingBigqueryDatasetSelectedResource']] = None): """ :param str dataset: Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 - - - - - + :param Sequence['ListingBigqueryDatasetSelectedResourceArgs'] selected_resources: Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + Structure is documented below. """ pulumi.set(__self__, "dataset", dataset) + if selected_resources is not None: + pulumi.set(__self__, "selected_resources", selected_resources) @property @pulumi.getter def dataset(self) -> str: """ Resource name of the dataset source for this listing. e.g. projects/myproject/datasets/123 + """ + return pulumi.get(self, "dataset") + + @property + @pulumi.getter(name="selectedResources") + def selected_resources(self) -> Optional[Sequence['outputs.ListingBigqueryDatasetSelectedResource']]: + """ + Resource in this dataset that is selectively shared. This field is required for data clean room exchanges. + Structure is documented below. + """ + return pulumi.get(self, "selected_resources") + + +@pulumi.output_type +class ListingBigqueryDatasetSelectedResource(dict): + def __init__(__self__, *, + table: Optional[str] = None): + """ + :param str table: Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" + + - - - + """ + if table is not None: + pulumi.set(__self__, "table", table) + + @property + @pulumi.getter + def table(self) -> Optional[str]: + """ + Format: For table: projects/{projectId}/datasets/{datasetId}/tables/{tableId} Example:"projects/test_project/datasets/test_dataset/tables/test_table" - - - """ - return pulumi.get(self, "dataset") + return pulumi.get(self, "table") @pulumi.output_type @@ -254,7 +371,9 @@ class ListingRestrictedExportConfig(dict): @staticmethod def __key_warning(key: str): suggest = None - if key == "restrictQueryResult": + if key == "restrictDirectTableAccess": + suggest = "restrict_direct_table_access" + elif key == "restrictQueryResult": suggest = "restrict_query_result" if suggest: @@ -270,13 +389,18 @@ def get(self, key: str, default = None) -> Any: def __init__(__self__, *, enabled: Optional[bool] = None, + restrict_direct_table_access: Optional[bool] = None, restrict_query_result: Optional[bool] = None): """ :param bool enabled: If true, enable restricted export. + :param bool restrict_direct_table_access: (Output) + If true, restrict direct table access(read api/tabledata.list) on linked table. :param bool restrict_query_result: If true, restrict export of query result derived from restricted linked dataset table. """ if enabled is not None: pulumi.set(__self__, "enabled", enabled) + if restrict_direct_table_access is not None: + pulumi.set(__self__, "restrict_direct_table_access", restrict_direct_table_access) if restrict_query_result is not None: pulumi.set(__self__, "restrict_query_result", restrict_query_result) @@ -288,6 +412,15 @@ def enabled(self) -> Optional[bool]: """ return pulumi.get(self, "enabled") + @property + @pulumi.getter(name="restrictDirectTableAccess") + def restrict_direct_table_access(self) -> Optional[bool]: + """ + (Output) + If true, restrict direct table access(read api/tabledata.list) on linked table. + """ + return pulumi.get(self, "restrict_direct_table_access") + @property @pulumi.getter(name="restrictQueryResult") def restrict_query_result(self) -> Optional[bool]: diff --git a/sdk/python/pulumi_gcp/bigtable/_inputs.py b/sdk/python/pulumi_gcp/bigtable/_inputs.py index e75f6ed8fb..3105cec8f4 100644 --- a/sdk/python/pulumi_gcp/bigtable/_inputs.py +++ b/sdk/python/pulumi_gcp/bigtable/_inputs.py @@ -714,17 +714,25 @@ class TableColumnFamilyArgsDict(TypedDict): """ The name of the column family. """ + type: NotRequired[pulumi.Input[str]] + """ + The type of the column family. + """ elif False: TableColumnFamilyArgsDict: TypeAlias = Mapping[str, Any] @pulumi.input_type class TableColumnFamilyArgs: def __init__(__self__, *, - family: pulumi.Input[str]): + family: pulumi.Input[str], + type: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] family: The name of the column family. + :param pulumi.Input[str] type: The type of the column family. """ pulumi.set(__self__, "family", family) + if type is not None: + pulumi.set(__self__, "type", type) @property @pulumi.getter @@ -738,6 +746,18 @@ def family(self) -> pulumi.Input[str]: def family(self, value: pulumi.Input[str]): pulumi.set(self, "family", value) + @property + @pulumi.getter + def type(self) -> Optional[pulumi.Input[str]]: + """ + The type of the column family. + """ + return pulumi.get(self, "type") + + @type.setter + def type(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "type", value) + if not MYPY: class TableIamBindingConditionArgsDict(TypedDict): diff --git a/sdk/python/pulumi_gcp/bigtable/outputs.py b/sdk/python/pulumi_gcp/bigtable/outputs.py index 6f77774867..145a0533c6 100644 --- a/sdk/python/pulumi_gcp/bigtable/outputs.py +++ b/sdk/python/pulumi_gcp/bigtable/outputs.py @@ -532,11 +532,15 @@ def retention_period(self) -> Optional[str]: @pulumi.output_type class TableColumnFamily(dict): def __init__(__self__, *, - family: str): + family: str, + type: Optional[str] = None): """ :param str family: The name of the column family. + :param str type: The type of the column family. """ pulumi.set(__self__, "family", family) + if type is not None: + pulumi.set(__self__, "type", type) @property @pulumi.getter @@ -546,6 +550,14 @@ def family(self) -> str: """ return pulumi.get(self, "family") + @property + @pulumi.getter + def type(self) -> Optional[str]: + """ + The type of the column family. + """ + return pulumi.get(self, "type") + @pulumi.output_type class TableIamBindingCondition(dict): diff --git a/sdk/python/pulumi_gcp/bigtable/table.py b/sdk/python/pulumi_gcp/bigtable/table.py index f5bb2dc427..718478e505 100644 --- a/sdk/python/pulumi_gcp/bigtable/table.py +++ b/sdk/python/pulumi_gcp/bigtable/table.py @@ -356,6 +356,23 @@ def __init__(__self__, }, { "family": "family-second", + "type": "intsum", + }, + { + "family": "family-third", + "type": \"\"\" { + \\x09\\x09\\x09\\x09\\x09"aggregateType": { + \\x09\\x09\\x09\\x09\\x09\\x09"max": {}, + \\x09\\x09\\x09\\x09\\x09\\x09"inputType": { + \\x09\\x09\\x09\\x09\\x09\\x09\\x09"int64Type": { + \\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09"encoding": { + \\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09"bigEndianBytes": {} + \\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09} + \\x09\\x09\\x09\\x09\\x09\\x09\\x09} + \\x09\\x09\\x09\\x09\\x09\\x09} + \\x09\\x09\\x09\\x09\\x09} + \\x09\\x09\\x09\\x09} + \"\"\", }, ], change_stream_retention="24h0m0s", @@ -446,6 +463,23 @@ def __init__(__self__, }, { "family": "family-second", + "type": "intsum", + }, + { + "family": "family-third", + "type": \"\"\" { + \\x09\\x09\\x09\\x09\\x09"aggregateType": { + \\x09\\x09\\x09\\x09\\x09\\x09"max": {}, + \\x09\\x09\\x09\\x09\\x09\\x09"inputType": { + \\x09\\x09\\x09\\x09\\x09\\x09\\x09"int64Type": { + \\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09"encoding": { + \\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09"bigEndianBytes": {} + \\x09\\x09\\x09\\x09\\x09\\x09\\x09\\x09} + \\x09\\x09\\x09\\x09\\x09\\x09\\x09} + \\x09\\x09\\x09\\x09\\x09\\x09} + \\x09\\x09\\x09\\x09\\x09} + \\x09\\x09\\x09\\x09} + \"\"\", }, ], change_stream_retention="24h0m0s", diff --git a/sdk/python/pulumi_gcp/certificateauthority/authority.py b/sdk/python/pulumi_gcp/certificateauthority/authority.py index 943b4d449e..0beb9d3508 100644 --- a/sdk/python/pulumi_gcp/certificateauthority/authority.py +++ b/sdk/python/pulumi_gcp/certificateauthority/authority.py @@ -49,7 +49,8 @@ def __init__(__self__, *, :param pulumi.Input[str] location: Location of the CertificateAuthority. A full list of valid locations can be found by running `gcloud privateca locations list`. :param pulumi.Input[str] pool: The name of the CaPool this Certificate Authority belongs to. - :param pulumi.Input[str] desired_state: Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + :param pulumi.Input[str] desired_state: Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + ENABLED, DISABLED, STAGED. :param pulumi.Input[str] gcs_bucket: The name of a Cloud Storage bucket where this CertificateAuthority will publish content, such as the CA certificate and CRLs. This must be a bucket name, without any prefixes (such as 'gs://') or suffixes (such as '.googleapis.com'). For example, to use a bucket named my-bucket, you would simply specify 'my-bucket'. If not specified, a managed bucket will @@ -177,7 +178,8 @@ def deletion_protection(self, value: Optional[pulumi.Input[bool]]): @pulumi.getter(name="desiredState") def desired_state(self) -> Optional[pulumi.Input[str]]: """ - Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + ENABLED, DISABLED, STAGED. """ return pulumi.get(self, "desired_state") @@ -340,7 +342,8 @@ def __init__(__self__, *, :param pulumi.Input[str] create_time: The time at which this CertificateAuthority was created. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". - :param pulumi.Input[str] desired_state: Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + :param pulumi.Input[str] desired_state: Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + ENABLED, DISABLED, STAGED. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] effective_labels: All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. :param pulumi.Input[str] gcs_bucket: The name of a Cloud Storage bucket where this CertificateAuthority will publish content, such as the CA certificate and CRLs. This must be a bucket name, without any prefixes (such as 'gs://') or suffixes (such as '.googleapis.com'). For @@ -496,7 +499,8 @@ def deletion_protection(self, value: Optional[pulumi.Input[bool]]): @pulumi.getter(name="desiredState") def desired_state(self) -> Optional[pulumi.Input[str]]: """ - Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + ENABLED, DISABLED, STAGED. """ return pulumi.get(self, "desired_state") @@ -1069,7 +1073,8 @@ def __init__(__self__, :param pulumi.Input[str] certificate_authority_id: The user provided Resource ID for this Certificate Authority. :param pulumi.Input[Union['AuthorityConfigArgs', 'AuthorityConfigArgsDict']] config: The config used to create a self-signed X.509 certificate or CSR. Structure is documented below. - :param pulumi.Input[str] desired_state: Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + :param pulumi.Input[str] desired_state: Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + ENABLED, DISABLED, STAGED. :param pulumi.Input[str] gcs_bucket: The name of a Cloud Storage bucket where this CertificateAuthority will publish content, such as the CA certificate and CRLs. This must be a bucket name, without any prefixes (such as 'gs://') or suffixes (such as '.googleapis.com'). For example, to use a bucket named my-bucket, you would simply specify 'my-bucket'. If not specified, a managed bucket will @@ -1530,7 +1535,8 @@ def get(resource_name: str, :param pulumi.Input[str] create_time: The time at which this CertificateAuthority was created. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". - :param pulumi.Input[str] desired_state: Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + :param pulumi.Input[str] desired_state: Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + ENABLED, DISABLED, STAGED. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] effective_labels: All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. :param pulumi.Input[str] gcs_bucket: The name of a Cloud Storage bucket where this CertificateAuthority will publish content, such as the CA certificate and CRLs. This must be a bucket name, without any prefixes (such as 'gs://') or suffixes (such as '.googleapis.com'). For @@ -1647,7 +1653,8 @@ def deletion_protection(self) -> pulumi.Output[Optional[bool]]: @pulumi.getter(name="desiredState") def desired_state(self) -> pulumi.Output[Optional[str]]: """ - Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. + Desired state of the CertificateAuthority. Set this field to 'STAGED' to create a 'STAGED' root CA. Possible values: + ENABLED, DISABLED, STAGED. """ return pulumi.get(self, "desired_state") diff --git a/sdk/python/pulumi_gcp/certificatemanager/__init__.py b/sdk/python/pulumi_gcp/certificatemanager/__init__.py index d7cc3237f3..1224a92249 100644 --- a/sdk/python/pulumi_gcp/certificatemanager/__init__.py +++ b/sdk/python/pulumi_gcp/certificatemanager/__init__.py @@ -11,6 +11,7 @@ from .certificate_map_entry import * from .dns_authorization import * from .get_certificate_map import * +from .get_certificates import * from .trust_config import * from ._inputs import * from . import outputs diff --git a/sdk/python/pulumi_gcp/certificatemanager/certificate.py b/sdk/python/pulumi_gcp/certificatemanager/certificate.py index dc637a23c1..068a8f073b 100644 --- a/sdk/python/pulumi_gcp/certificatemanager/certificate.py +++ b/sdk/python/pulumi_gcp/certificatemanager/certificate.py @@ -205,6 +205,7 @@ def __init__(__self__, *, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, pulumi_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + san_dnsnames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, scope: Optional[pulumi.Input[str]] = None, self_managed: Optional[pulumi.Input['CertificateSelfManagedArgs']] = None): """ @@ -229,6 +230,7 @@ def __init__(__self__, *, If it is not provided, the provider project is used. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] pulumi_labels: The combination of labels configured directly on the resource and default labels configured on the provider. + :param pulumi.Input[Sequence[pulumi.Input[str]]] san_dnsnames: The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) :param pulumi.Input[str] scope: The scope of the certificate. DEFAULT: Certificates with default scope are served from core Google data centers. If unsure, choose this option. @@ -257,6 +259,8 @@ def __init__(__self__, *, pulumi.set(__self__, "project", project) if pulumi_labels is not None: pulumi.set(__self__, "pulumi_labels", pulumi_labels) + if san_dnsnames is not None: + pulumi.set(__self__, "san_dnsnames", san_dnsnames) if scope is not None: pulumi.set(__self__, "scope", scope) if self_managed is not None: @@ -370,6 +374,18 @@ def pulumi_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]] def pulumi_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "pulumi_labels", value) + @property + @pulumi.getter(name="sanDnsnames") + def san_dnsnames(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: + """ + The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + """ + return pulumi.get(self, "san_dnsnames") + + @san_dnsnames.setter + def san_dnsnames(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): + pulumi.set(self, "san_dnsnames", value) + @property @pulumi.getter def scope(self) -> Optional[pulumi.Input[str]]: @@ -1077,6 +1093,7 @@ def _internal_init(__self__, __props__.__dict__["self_managed"] = self_managed __props__.__dict__["effective_labels"] = None __props__.__dict__["pulumi_labels"] = None + __props__.__dict__["san_dnsnames"] = None secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["effectiveLabels", "pulumiLabels"]) opts = pulumi.ResourceOptions.merge(opts, secret_opts) super(Certificate, __self__).__init__( @@ -1097,6 +1114,7 @@ def get(resource_name: str, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, pulumi_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + san_dnsnames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, scope: Optional[pulumi.Input[str]] = None, self_managed: Optional[pulumi.Input[Union['CertificateSelfManagedArgs', 'CertificateSelfManagedArgsDict']]] = None) -> 'Certificate': """ @@ -1126,6 +1144,7 @@ def get(resource_name: str, If it is not provided, the provider project is used. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] pulumi_labels: The combination of labels configured directly on the resource and default labels configured on the provider. + :param pulumi.Input[Sequence[pulumi.Input[str]]] san_dnsnames: The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) :param pulumi.Input[str] scope: The scope of the certificate. DEFAULT: Certificates with default scope are served from core Google data centers. If unsure, choose this option. @@ -1150,6 +1169,7 @@ def get(resource_name: str, __props__.__dict__["name"] = name __props__.__dict__["project"] = project __props__.__dict__["pulumi_labels"] = pulumi_labels + __props__.__dict__["san_dnsnames"] = san_dnsnames __props__.__dict__["scope"] = scope __props__.__dict__["self_managed"] = self_managed return Certificate(resource_name, opts=opts, __props__=__props__) @@ -1230,6 +1250,14 @@ def pulumi_labels(self) -> pulumi.Output[Mapping[str, str]]: """ return pulumi.get(self, "pulumi_labels") + @property + @pulumi.getter(name="sanDnsnames") + def san_dnsnames(self) -> pulumi.Output[Sequence[str]]: + """ + The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + """ + return pulumi.get(self, "san_dnsnames") + @property @pulumi.getter def scope(self) -> pulumi.Output[Optional[str]]: diff --git a/sdk/python/pulumi_gcp/certificatemanager/get_certificates.py b/sdk/python/pulumi_gcp/certificatemanager/get_certificates.py new file mode 100644 index 0000000000..7c512dd447 --- /dev/null +++ b/sdk/python/pulumi_gcp/certificatemanager/get_certificates.py @@ -0,0 +1,150 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import sys +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +if sys.version_info >= (3, 11): + from typing import NotRequired, TypedDict, TypeAlias +else: + from typing_extensions import NotRequired, TypedDict, TypeAlias +from .. import _utilities +from . import outputs + +__all__ = [ + 'GetCertificatesResult', + 'AwaitableGetCertificatesResult', + 'get_certificates', + 'get_certificates_output', +] + +@pulumi.output_type +class GetCertificatesResult: + """ + A collection of values returned by getCertificates. + """ + def __init__(__self__, certificates=None, filter=None, id=None, region=None): + if certificates and not isinstance(certificates, list): + raise TypeError("Expected argument 'certificates' to be a list") + pulumi.set(__self__, "certificates", certificates) + if filter and not isinstance(filter, str): + raise TypeError("Expected argument 'filter' to be a str") + pulumi.set(__self__, "filter", filter) + if id and not isinstance(id, str): + raise TypeError("Expected argument 'id' to be a str") + pulumi.set(__self__, "id", id) + if region and not isinstance(region, str): + raise TypeError("Expected argument 'region' to be a str") + pulumi.set(__self__, "region", region) + + @property + @pulumi.getter + def certificates(self) -> Sequence['outputs.GetCertificatesCertificateResult']: + return pulumi.get(self, "certificates") + + @property + @pulumi.getter + def filter(self) -> Optional[str]: + return pulumi.get(self, "filter") + + @property + @pulumi.getter + def id(self) -> str: + """ + The provider-assigned unique ID for this managed resource. + """ + return pulumi.get(self, "id") + + @property + @pulumi.getter + def region(self) -> Optional[str]: + return pulumi.get(self, "region") + + +class AwaitableGetCertificatesResult(GetCertificatesResult): + # pylint: disable=using-constant-test + def __await__(self): + if False: + yield self + return GetCertificatesResult( + certificates=self.certificates, + filter=self.filter, + id=self.id, + region=self.region) + + +def get_certificates(filter: Optional[str] = None, + region: Optional[str] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCertificatesResult: + """ + List all certificates within Google Certificate Manager for a given project, region or filter. + + ## Example Usage + + ```python + import pulumi + import pulumi_gcp as gcp + + default = gcp.certificatemanager.get_certificates() + ``` + + ### With A Filter + + ```python + import pulumi + import pulumi_gcp as gcp + + default = gcp.certificatemanager.get_certificates(filter="name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*") + ``` + + + :param str filter: Filter expression to restrict the certificates returned. + :param str region: The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + """ + __args__ = dict() + __args__['filter'] = filter + __args__['region'] = region + opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) + __ret__ = pulumi.runtime.invoke('gcp:certificatemanager/getCertificates:getCertificates', __args__, opts=opts, typ=GetCertificatesResult).value + + return AwaitableGetCertificatesResult( + certificates=pulumi.get(__ret__, 'certificates'), + filter=pulumi.get(__ret__, 'filter'), + id=pulumi.get(__ret__, 'id'), + region=pulumi.get(__ret__, 'region')) + + +@_utilities.lift_output_func(get_certificates) +def get_certificates_output(filter: Optional[pulumi.Input[Optional[str]]] = None, + region: Optional[pulumi.Input[Optional[str]]] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCertificatesResult]: + """ + List all certificates within Google Certificate Manager for a given project, region or filter. + + ## Example Usage + + ```python + import pulumi + import pulumi_gcp as gcp + + default = gcp.certificatemanager.get_certificates() + ``` + + ### With A Filter + + ```python + import pulumi + import pulumi_gcp as gcp + + default = gcp.certificatemanager.get_certificates(filter="name:projects/PROJECT_ID/locations/REGION/certificates/certificate-name-*") + ``` + + + :param str filter: Filter expression to restrict the certificates returned. + :param str region: The region in which the resource belongs. If it is not provided, `GLOBAL` is used. + """ + ... diff --git a/sdk/python/pulumi_gcp/certificatemanager/outputs.py b/sdk/python/pulumi_gcp/certificatemanager/outputs.py index 8fefebfdcc..e18265f38a 100644 --- a/sdk/python/pulumi_gcp/certificatemanager/outputs.py +++ b/sdk/python/pulumi_gcp/certificatemanager/outputs.py @@ -31,6 +31,10 @@ 'TrustConfigTrustStoreTrustAnchor', 'GetCertificateMapGclbTargetResult', 'GetCertificateMapGclbTargetIpConfigResult', + 'GetCertificatesCertificateResult', + 'GetCertificatesCertificateManagedResult', + 'GetCertificatesCertificateManagedAuthorizationAttemptInfoResult', + 'GetCertificatesCertificateManagedProvisioningIssueResult', ] @pulumi.output_type @@ -892,3 +896,321 @@ def ports(self) -> Sequence[int]: return pulumi.get(self, "ports") +@pulumi.output_type +class GetCertificatesCertificateResult(dict): + def __init__(__self__, *, + description: str, + effective_labels: Mapping[str, str], + labels: Mapping[str, str], + location: str, + manageds: Sequence['outputs.GetCertificatesCertificateManagedResult'], + name: str, + project: str, + pulumi_labels: Mapping[str, str], + san_dnsnames: Sequence[str], + scope: str): + """ + :param str description: A human-readable description of the resource. + :param Mapping[str, str] labels: Set of label tags associated with the Certificate resource. + + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field 'effective_labels' for all of the labels present on the resource. + :param str location: The Certificate Manager location. If not specified, "global" is used. + :param Sequence['GetCertificatesCertificateManagedArgs'] manageds: Configuration and state of a Managed Certificate. + Certificate Manager provisions and renews Managed Certificates + automatically, for as long as it's authorized to do so. + :param str name: A user-defined name of the certificate. Certificate names must be unique + The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, + and all following characters must be a dash, underscore, letter or digit. + :param str project: The ID of the project in which the resource belongs. If it + is not provided, the provider project is used. + :param Mapping[str, str] pulumi_labels: The combination of labels configured directly on the resource + and default labels configured on the provider. + :param Sequence[str] san_dnsnames: The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + :param str scope: The scope of the certificate. + + DEFAULT: Certificates with default scope are served from core Google data centers. + If unsure, choose this option. + + EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence. + See https://cloud.google.com/vpc/docs/edge-locations. + + ALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs). + See https://cloud.google.com/compute/docs/regions-zones + """ + pulumi.set(__self__, "description", description) + pulumi.set(__self__, "effective_labels", effective_labels) + pulumi.set(__self__, "labels", labels) + pulumi.set(__self__, "location", location) + pulumi.set(__self__, "manageds", manageds) + pulumi.set(__self__, "name", name) + pulumi.set(__self__, "project", project) + pulumi.set(__self__, "pulumi_labels", pulumi_labels) + pulumi.set(__self__, "san_dnsnames", san_dnsnames) + pulumi.set(__self__, "scope", scope) + + @property + @pulumi.getter + def description(self) -> str: + """ + A human-readable description of the resource. + """ + return pulumi.get(self, "description") + + @property + @pulumi.getter(name="effectiveLabels") + def effective_labels(self) -> Mapping[str, str]: + return pulumi.get(self, "effective_labels") + + @property + @pulumi.getter + def labels(self) -> Mapping[str, str]: + """ + Set of label tags associated with the Certificate resource. + + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field 'effective_labels' for all of the labels present on the resource. + """ + return pulumi.get(self, "labels") + + @property + @pulumi.getter + def location(self) -> str: + """ + The Certificate Manager location. If not specified, "global" is used. + """ + return pulumi.get(self, "location") + + @property + @pulumi.getter + def manageds(self) -> Sequence['outputs.GetCertificatesCertificateManagedResult']: + """ + Configuration and state of a Managed Certificate. + Certificate Manager provisions and renews Managed Certificates + automatically, for as long as it's authorized to do so. + """ + return pulumi.get(self, "manageds") + + @property + @pulumi.getter + def name(self) -> str: + """ + A user-defined name of the certificate. Certificate names must be unique + The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, + and all following characters must be a dash, underscore, letter or digit. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter + def project(self) -> str: + """ + The ID of the project in which the resource belongs. If it + is not provided, the provider project is used. + """ + return pulumi.get(self, "project") + + @property + @pulumi.getter(name="pulumiLabels") + def pulumi_labels(self) -> Mapping[str, str]: + """ + The combination of labels configured directly on the resource + and default labels configured on the provider. + """ + return pulumi.get(self, "pulumi_labels") + + @property + @pulumi.getter(name="sanDnsnames") + def san_dnsnames(self) -> Sequence[str]: + """ + The list of Subject Alternative Names of dnsName type defined in the certificate (see RFC 5280 4.2.1.6) + """ + return pulumi.get(self, "san_dnsnames") + + @property + @pulumi.getter + def scope(self) -> str: + """ + The scope of the certificate. + + DEFAULT: Certificates with default scope are served from core Google data centers. + If unsure, choose this option. + + EDGE_CACHE: Certificates with scope EDGE_CACHE are special-purposed certificates, served from Edge Points of Presence. + See https://cloud.google.com/vpc/docs/edge-locations. + + ALL_REGIONS: Certificates with ALL_REGIONS scope are served from all GCP regions (You can only use ALL_REGIONS with global certs). + See https://cloud.google.com/compute/docs/regions-zones + """ + return pulumi.get(self, "scope") + + +@pulumi.output_type +class GetCertificatesCertificateManagedResult(dict): + def __init__(__self__, *, + authorization_attempt_infos: Sequence['outputs.GetCertificatesCertificateManagedAuthorizationAttemptInfoResult'], + dns_authorizations: Sequence[str], + domains: Sequence[str], + issuance_config: str, + provisioning_issues: Sequence['outputs.GetCertificatesCertificateManagedProvisioningIssueResult'], + state: str): + """ + :param Sequence['GetCertificatesCertificateManagedAuthorizationAttemptInfoArgs'] authorization_attempt_infos: Detailed state of the latest authorization attempt for each domain + specified for this Managed Certificate. + :param Sequence[str] dns_authorizations: Authorizations that will be used for performing domain authorization. Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + :param Sequence[str] domains: The domains for which a managed SSL certificate will be generated. + Wildcard domains are only supported with DNS challenge resolution + :param str issuance_config: The resource name for a CertificateIssuanceConfig used to configure private PKI certificates in the format projects/*/locations/*/certificateIssuanceConfigs/*. + If this field is not set, the certificates will instead be publicly signed as documented at https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa. + Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + :param Sequence['GetCertificatesCertificateManagedProvisioningIssueArgs'] provisioning_issues: Information about issues with provisioning this Managed Certificate. + :param str state: A state of this Managed Certificate. + """ + pulumi.set(__self__, "authorization_attempt_infos", authorization_attempt_infos) + pulumi.set(__self__, "dns_authorizations", dns_authorizations) + pulumi.set(__self__, "domains", domains) + pulumi.set(__self__, "issuance_config", issuance_config) + pulumi.set(__self__, "provisioning_issues", provisioning_issues) + pulumi.set(__self__, "state", state) + + @property + @pulumi.getter(name="authorizationAttemptInfos") + def authorization_attempt_infos(self) -> Sequence['outputs.GetCertificatesCertificateManagedAuthorizationAttemptInfoResult']: + """ + Detailed state of the latest authorization attempt for each domain + specified for this Managed Certificate. + """ + return pulumi.get(self, "authorization_attempt_infos") + + @property + @pulumi.getter(name="dnsAuthorizations") + def dns_authorizations(self) -> Sequence[str]: + """ + Authorizations that will be used for performing domain authorization. Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + """ + return pulumi.get(self, "dns_authorizations") + + @property + @pulumi.getter + def domains(self) -> Sequence[str]: + """ + The domains for which a managed SSL certificate will be generated. + Wildcard domains are only supported with DNS challenge resolution + """ + return pulumi.get(self, "domains") + + @property + @pulumi.getter(name="issuanceConfig") + def issuance_config(self) -> str: + """ + The resource name for a CertificateIssuanceConfig used to configure private PKI certificates in the format projects/*/locations/*/certificateIssuanceConfigs/*. + If this field is not set, the certificates will instead be publicly signed as documented at https://cloud.google.com/load-balancing/docs/ssl-certificates/google-managed-certs#caa. + Either issuanceConfig or dnsAuthorizations should be specificed, but not both. + """ + return pulumi.get(self, "issuance_config") + + @property + @pulumi.getter(name="provisioningIssues") + def provisioning_issues(self) -> Sequence['outputs.GetCertificatesCertificateManagedProvisioningIssueResult']: + """ + Information about issues with provisioning this Managed Certificate. + """ + return pulumi.get(self, "provisioning_issues") + + @property + @pulumi.getter + def state(self) -> str: + """ + A state of this Managed Certificate. + """ + return pulumi.get(self, "state") + + +@pulumi.output_type +class GetCertificatesCertificateManagedAuthorizationAttemptInfoResult(dict): + def __init__(__self__, *, + details: str, + domain: str, + failure_reason: str, + state: str): + """ + :param str details: Human readable explanation for reaching the state. Provided to help + address the configuration issues. + Not guaranteed to be stable. For programmatic access use 'failure_reason' field. + :param str domain: Domain name of the authorization attempt. + :param str failure_reason: Reason for failure of the authorization attempt for the domain. + :param str state: State of the domain for managed certificate issuance. + """ + pulumi.set(__self__, "details", details) + pulumi.set(__self__, "domain", domain) + pulumi.set(__self__, "failure_reason", failure_reason) + pulumi.set(__self__, "state", state) + + @property + @pulumi.getter + def details(self) -> str: + """ + Human readable explanation for reaching the state. Provided to help + address the configuration issues. + Not guaranteed to be stable. For programmatic access use 'failure_reason' field. + """ + return pulumi.get(self, "details") + + @property + @pulumi.getter + def domain(self) -> str: + """ + Domain name of the authorization attempt. + """ + return pulumi.get(self, "domain") + + @property + @pulumi.getter(name="failureReason") + def failure_reason(self) -> str: + """ + Reason for failure of the authorization attempt for the domain. + """ + return pulumi.get(self, "failure_reason") + + @property + @pulumi.getter + def state(self) -> str: + """ + State of the domain for managed certificate issuance. + """ + return pulumi.get(self, "state") + + +@pulumi.output_type +class GetCertificatesCertificateManagedProvisioningIssueResult(dict): + def __init__(__self__, *, + details: str, + reason: str): + """ + :param str details: Human readable explanation about the issue. Provided to help address + the configuration issues. + Not guaranteed to be stable. For programmatic access use 'reason' field. + :param str reason: Reason for provisioning failures. + """ + pulumi.set(__self__, "details", details) + pulumi.set(__self__, "reason", reason) + + @property + @pulumi.getter + def details(self) -> str: + """ + Human readable explanation about the issue. Provided to help address + the configuration issues. + Not guaranteed to be stable. For programmatic access use 'reason' field. + """ + return pulumi.get(self, "details") + + @property + @pulumi.getter + def reason(self) -> str: + """ + Reason for provisioning failures. + """ + return pulumi.get(self, "reason") + + diff --git a/sdk/python/pulumi_gcp/cloudbuild/_inputs.py b/sdk/python/pulumi_gcp/cloudbuild/_inputs.py index b189a56f1a..eceb267dac 100644 --- a/sdk/python/pulumi_gcp/cloudbuild/_inputs.py +++ b/sdk/python/pulumi_gcp/cloudbuild/_inputs.py @@ -3961,11 +3961,11 @@ def peered_network_ip_range(self, value: Optional[pulumi.Input[str]]): class WorkerPoolWorkerConfigArgsDict(TypedDict): disk_size_gb: NotRequired[pulumi.Input[int]] """ - Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. """ machine_type: NotRequired[pulumi.Input[str]] """ - Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. """ no_external_ip: NotRequired[pulumi.Input[bool]] """ @@ -3981,8 +3981,8 @@ def __init__(__self__, *, machine_type: Optional[pulumi.Input[str]] = None, no_external_ip: Optional[pulumi.Input[bool]] = None): """ - :param pulumi.Input[int] disk_size_gb: Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. - :param pulumi.Input[str] machine_type: Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + :param pulumi.Input[int] disk_size_gb: Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + :param pulumi.Input[str] machine_type: Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. :param pulumi.Input[bool] no_external_ip: If true, workers are created without any public address, which prevents network egress to public IPs. """ if disk_size_gb is not None: @@ -3996,7 +3996,7 @@ def __init__(__self__, *, @pulumi.getter(name="diskSizeGb") def disk_size_gb(self) -> Optional[pulumi.Input[int]]: """ - Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. """ return pulumi.get(self, "disk_size_gb") @@ -4008,7 +4008,7 @@ def disk_size_gb(self, value: Optional[pulumi.Input[int]]): @pulumi.getter(name="machineType") def machine_type(self) -> Optional[pulumi.Input[str]]: """ - Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. """ return pulumi.get(self, "machine_type") diff --git a/sdk/python/pulumi_gcp/cloudbuild/outputs.py b/sdk/python/pulumi_gcp/cloudbuild/outputs.py index 2ab99ba6a0..30da4e041b 100644 --- a/sdk/python/pulumi_gcp/cloudbuild/outputs.py +++ b/sdk/python/pulumi_gcp/cloudbuild/outputs.py @@ -3036,8 +3036,8 @@ def __init__(__self__, *, machine_type: Optional[str] = None, no_external_ip: Optional[bool] = None): """ - :param int disk_size_gb: Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. - :param str machine_type: Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + :param int disk_size_gb: Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + :param str machine_type: Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. :param bool no_external_ip: If true, workers are created without any public address, which prevents network egress to public IPs. """ if disk_size_gb is not None: @@ -3051,7 +3051,7 @@ def __init__(__self__, *, @pulumi.getter(name="diskSizeGb") def disk_size_gb(self) -> Optional[int]: """ - Size of the disk attached to the worker, in GB. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. + Size of the disk attached to the worker, in GB. See [diskSizeGb](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#disksizegb). Specify a value of up to 1000. If `0` is specified, Cloud Build will use a standard disk size. """ return pulumi.get(self, "disk_size_gb") @@ -3059,7 +3059,7 @@ def disk_size_gb(self) -> Optional[int]: @pulumi.getter(name="machineType") def machine_type(self) -> Optional[str]: """ - Machine type of a worker, such as `n1-standard-1`. See (https://cloud.google.com/cloud-build/docs/custom-workers/worker-pool-config-file). If left blank, Cloud Build will use `n1-standard-1`. + Machine type of a worker, such as `n1-standard-1`. See [machineType](https://cloud.google.com/build/docs/private-pools/private-pool-config-file-schema#machinetype). If left blank, Cloud Build will use `n1-standard-1`. """ return pulumi.get(self, "machine_type") diff --git a/sdk/python/pulumi_gcp/cloudrun/_inputs.py b/sdk/python/pulumi_gcp/cloudrun/_inputs.py index 21909e6d51..b868432e6e 100644 --- a/sdk/python/pulumi_gcp/cloudrun/_inputs.py +++ b/sdk/python/pulumi_gcp/cloudrun/_inputs.py @@ -3870,8 +3870,7 @@ class ServiceTemplateSpecVolumeArgsDict(TypedDict): nfs: NotRequired[pulumi.Input['ServiceTemplateSpecVolumeNfsArgsDict']] """ A filesystem backed by a Network File System share. This filesystem requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" Structure is documented below. """ secret: NotRequired[pulumi.Input['ServiceTemplateSpecVolumeSecretArgsDict']] @@ -3899,8 +3898,7 @@ def __init__(__self__, *, :param pulumi.Input['ServiceTemplateSpecVolumeEmptyDirArgs'] empty_dir: Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs). Structure is documented below. :param pulumi.Input['ServiceTemplateSpecVolumeNfsArgs'] nfs: A filesystem backed by a Network File System share. This filesystem requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" Structure is documented below. :param pulumi.Input['ServiceTemplateSpecVolumeSecretArgs'] secret: The secret's value will be presented as the content of a file whose name is defined in the item path. If no items are defined, the name of @@ -3960,8 +3958,7 @@ def empty_dir(self, value: Optional[pulumi.Input['ServiceTemplateSpecVolumeEmpty def nfs(self) -> Optional[pulumi.Input['ServiceTemplateSpecVolumeNfsArgs']]: """ A filesystem backed by a Network File System share. This filesystem requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" Structure is documented below. """ return pulumi.get(self, "nfs") @@ -3992,8 +3989,7 @@ class ServiceTemplateSpecVolumeCsiArgsDict(TypedDict): """ Unique name representing the type of file system to be created. Cloud Run supports the following values: * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" """ read_only: NotRequired[pulumi.Input[bool]] """ @@ -4017,8 +4013,7 @@ def __init__(__self__, *, """ :param pulumi.Input[str] driver: Unique name representing the type of file system to be created. Cloud Run supports the following values: * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" :param pulumi.Input[bool] read_only: If true, all mounts created from this volume will be read-only. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] volume_attributes: Driver-specific attributes. The following options are supported for available drivers: * gcsfuse.run.googleapis.com @@ -4036,8 +4031,7 @@ def driver(self) -> pulumi.Input[str]: """ Unique name representing the type of file system to be created. Cloud Run supports the following values: * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" """ return pulumi.get(self, "driver") diff --git a/sdk/python/pulumi_gcp/cloudrun/outputs.py b/sdk/python/pulumi_gcp/cloudrun/outputs.py index 68bbfc6782..d98cb75e4e 100644 --- a/sdk/python/pulumi_gcp/cloudrun/outputs.py +++ b/sdk/python/pulumi_gcp/cloudrun/outputs.py @@ -2813,8 +2813,7 @@ def __init__(__self__, *, :param 'ServiceTemplateSpecVolumeEmptyDirArgs' empty_dir: Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs). Structure is documented below. :param 'ServiceTemplateSpecVolumeNfsArgs' nfs: A filesystem backed by a Network File System share. This filesystem requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" Structure is documented below. :param 'ServiceTemplateSpecVolumeSecretArgs' secret: The secret's value will be presented as the content of a file whose name is defined in the item path. If no items are defined, the name of @@ -2862,8 +2861,7 @@ def empty_dir(self) -> Optional['outputs.ServiceTemplateSpecVolumeEmptyDir']: def nfs(self) -> Optional['outputs.ServiceTemplateSpecVolumeNfs']: """ A filesystem backed by a Network File System share. This filesystem requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" Structure is documented below. """ return pulumi.get(self, "nfs") @@ -2908,8 +2906,7 @@ def __init__(__self__, *, """ :param str driver: Unique name representing the type of file system to be created. Cloud Run supports the following values: * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" :param bool read_only: If true, all mounts created from this volume will be read-only. :param Mapping[str, str] volume_attributes: Driver-specific attributes. The following options are supported for available drivers: * gcsfuse.run.googleapis.com @@ -2927,8 +2924,7 @@ def driver(self) -> str: """ Unique name representing the type of file system to be created. Cloud Run supports the following values: * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" """ return pulumi.get(self, "driver") @@ -4812,8 +4808,7 @@ def __init__(__self__, *, :param Sequence['GetServiceTemplateSpecVolumeEmptyDirArgs'] empty_dirs: Ephemeral storage which can be backed by real disks (HD, SSD), network storage or memory (i.e. tmpfs). For now only in memory (tmpfs) is supported. It is ephemeral in the sense that when the sandbox is taken down, the data is destroyed with it (it does not persist across sandbox runs). :param str name: The name of the Cloud Run Service. :param Sequence['GetServiceTemplateSpecVolumeNfArgs'] nfs: A filesystem backed by a Network File System share. This filesystem requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" :param Sequence['GetServiceTemplateSpecVolumeSecretArgs'] secrets: The secret's value will be presented as the content of a file whose name is defined in the item path. If no items are defined, the name of the file is the secret_name. @@ -4853,8 +4848,7 @@ def name(self) -> str: def nfs(self) -> Sequence['outputs.GetServiceTemplateSpecVolumeNfResult']: """ A filesystem backed by a Network File System share. This filesystem requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" """ return pulumi.get(self, "nfs") @@ -4878,8 +4872,7 @@ def __init__(__self__, *, """ :param str driver: Unique name representing the type of file system to be created. Cloud Run supports the following values: * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" :param bool read_only: If true, all mounts created from this volume will be read-only. :param Mapping[str, str] volume_attributes: Driver-specific attributes. The following options are supported for available drivers: * gcsfuse.run.googleapis.com @@ -4895,8 +4888,7 @@ def driver(self) -> str: """ Unique name representing the type of file system to be created. Cloud Run supports the following values: * gcsfuse.run.googleapis.com: Mount a Google Cloud Storage bucket using GCSFuse. This driver requires the - run.googleapis.com/execution-environment annotation to be set to "gen2" and - run.googleapis.com/launch-stage set to "BETA" or "ALPHA". + run.googleapis.com/execution-environment annotation to be unset or set to "gen2" """ return pulumi.get(self, "driver") diff --git a/sdk/python/pulumi_gcp/cloudrunv2/_inputs.py b/sdk/python/pulumi_gcp/cloudrunv2/_inputs.py index 43f6f006bf..a0405ad109 100644 --- a/sdk/python/pulumi_gcp/cloudrunv2/_inputs.py +++ b/sdk/python/pulumi_gcp/cloudrunv2/_inputs.py @@ -111,6 +111,8 @@ 'ServiceTemplateContainerVolumeMountArgsDict', 'ServiceTemplateScalingArgs', 'ServiceTemplateScalingArgsDict', + 'ServiceTemplateServiceMeshArgs', + 'ServiceTemplateServiceMeshArgsDict', 'ServiceTemplateVolumeArgs', 'ServiceTemplateVolumeArgsDict', 'ServiceTemplateVolumeCloudSqlInstanceArgs', @@ -1427,12 +1429,12 @@ class JobTemplateTemplateVolumeArgsDict(TypedDict): """ gcs: NotRequired[pulumi.Input['JobTemplateTemplateVolumeGcsArgsDict']] """ - Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + Cloud Storage bucket mounted as a volume using GCSFuse. Structure is documented below. """ nfs: NotRequired[pulumi.Input['JobTemplateTemplateVolumeNfsArgsDict']] """ - NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + NFS share mounted as a volume. Structure is documented below. """ secret: NotRequired[pulumi.Input['JobTemplateTemplateVolumeSecretArgsDict']] @@ -1458,9 +1460,9 @@ def __init__(__self__, *, Structure is documented below. :param pulumi.Input['JobTemplateTemplateVolumeEmptyDirArgs'] empty_dir: Ephemeral storage used as a shared volume. Structure is documented below. - :param pulumi.Input['JobTemplateTemplateVolumeGcsArgs'] gcs: Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + :param pulumi.Input['JobTemplateTemplateVolumeGcsArgs'] gcs: Cloud Storage bucket mounted as a volume using GCSFuse. Structure is documented below. - :param pulumi.Input['JobTemplateTemplateVolumeNfsArgs'] nfs: NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + :param pulumi.Input['JobTemplateTemplateVolumeNfsArgs'] nfs: NFS share mounted as a volume. Structure is documented below. :param pulumi.Input['JobTemplateTemplateVolumeSecretArgs'] secret: Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret Structure is documented below. @@ -1519,7 +1521,7 @@ def empty_dir(self, value: Optional[pulumi.Input['JobTemplateTemplateVolumeEmpty @pulumi.getter def gcs(self) -> Optional[pulumi.Input['JobTemplateTemplateVolumeGcsArgs']]: """ - Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + Cloud Storage bucket mounted as a volume using GCSFuse. Structure is documented below. """ return pulumi.get(self, "gcs") @@ -1532,7 +1534,7 @@ def gcs(self, value: Optional[pulumi.Input['JobTemplateTemplateVolumeGcsArgs']]) @pulumi.getter def nfs(self) -> Optional[pulumi.Input['JobTemplateTemplateVolumeNfsArgs']]: """ - NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + NFS share mounted as a volume. Structure is documented below. """ return pulumi.get(self, "nfs") @@ -2723,6 +2725,11 @@ class ServiceTemplateArgsDict(TypedDict): """ Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. """ + service_mesh: NotRequired[pulumi.Input['ServiceTemplateServiceMeshArgsDict']] + """ + Enables Cloud Service Mesh for this Revision. + Structure is documented below. + """ session_affinity: NotRequired[pulumi.Input[bool]] """ Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity @@ -2757,6 +2764,7 @@ def __init__(__self__, *, revision: Optional[pulumi.Input[str]] = None, scaling: Optional[pulumi.Input['ServiceTemplateScalingArgs']] = None, service_account: Optional[pulumi.Input[str]] = None, + service_mesh: Optional[pulumi.Input['ServiceTemplateServiceMeshArgs']] = None, session_affinity: Optional[pulumi.Input[bool]] = None, timeout: Optional[pulumi.Input[str]] = None, volumes: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateVolumeArgs']]]] = None, @@ -2781,6 +2789,8 @@ def __init__(__self__, *, :param pulumi.Input['ServiceTemplateScalingArgs'] scaling: Scaling settings for this Revision. Structure is documented below. :param pulumi.Input[str] service_account: Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. + :param pulumi.Input['ServiceTemplateServiceMeshArgs'] service_mesh: Enables Cloud Service Mesh for this Revision. + Structure is documented below. :param pulumi.Input[bool] session_affinity: Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity :param pulumi.Input[str] timeout: Max allowed time for an instance to respond to a request. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". @@ -2807,6 +2817,8 @@ def __init__(__self__, *, pulumi.set(__self__, "scaling", scaling) if service_account is not None: pulumi.set(__self__, "service_account", service_account) + if service_mesh is not None: + pulumi.set(__self__, "service_mesh", service_mesh) if session_affinity is not None: pulumi.set(__self__, "session_affinity", session_affinity) if timeout is not None: @@ -2934,6 +2946,19 @@ def service_account(self) -> Optional[pulumi.Input[str]]: def service_account(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service_account", value) + @property + @pulumi.getter(name="serviceMesh") + def service_mesh(self) -> Optional[pulumi.Input['ServiceTemplateServiceMeshArgs']]: + """ + Enables Cloud Service Mesh for this Revision. + Structure is documented below. + """ + return pulumi.get(self, "service_mesh") + + @service_mesh.setter + def service_mesh(self, value: Optional[pulumi.Input['ServiceTemplateServiceMeshArgs']]): + pulumi.set(self, "service_mesh", value) + @property @pulumi.getter(name="sessionAffinity") def session_affinity(self) -> Optional[pulumi.Input[bool]]: @@ -4418,6 +4443,44 @@ def min_instance_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "min_instance_count", value) +if not MYPY: + class ServiceTemplateServiceMeshArgsDict(TypedDict): + mesh: NotRequired[pulumi.Input[str]] + """ + The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + + - - - + """ +elif False: + ServiceTemplateServiceMeshArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class ServiceTemplateServiceMeshArgs: + def __init__(__self__, *, + mesh: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] mesh: The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + + - - - + """ + if mesh is not None: + pulumi.set(__self__, "mesh", mesh) + + @property + @pulumi.getter + def mesh(self) -> Optional[pulumi.Input[str]]: + """ + The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + + - - - + """ + return pulumi.get(self, "mesh") + + @mesh.setter + def mesh(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "mesh", value) + + if not MYPY: class ServiceTemplateVolumeArgsDict(TypedDict): name: pulumi.Input[str] @@ -4436,7 +4499,7 @@ class ServiceTemplateVolumeArgsDict(TypedDict): """ gcs: NotRequired[pulumi.Input['ServiceTemplateVolumeGcsArgsDict']] """ - Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. Structure is documented below. """ nfs: NotRequired[pulumi.Input['ServiceTemplateVolumeNfsArgsDict']] @@ -4467,7 +4530,7 @@ def __init__(__self__, *, Structure is documented below. :param pulumi.Input['ServiceTemplateVolumeEmptyDirArgs'] empty_dir: Ephemeral storage used as a shared volume. Structure is documented below. - :param pulumi.Input['ServiceTemplateVolumeGcsArgs'] gcs: Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + :param pulumi.Input['ServiceTemplateVolumeGcsArgs'] gcs: Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. Structure is documented below. :param pulumi.Input['ServiceTemplateVolumeNfsArgs'] nfs: Represents an NFS mount. Structure is documented below. @@ -4528,7 +4591,7 @@ def empty_dir(self, value: Optional[pulumi.Input['ServiceTemplateVolumeEmptyDirA @pulumi.getter def gcs(self) -> Optional[pulumi.Input['ServiceTemplateVolumeGcsArgs']]: """ - Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. Structure is documented below. """ return pulumi.get(self, "gcs") @@ -4718,8 +4781,6 @@ class ServiceTemplateVolumeNfsArgsDict(TypedDict): read_only: NotRequired[pulumi.Input[bool]] """ If true, mount the NFS volume as read only - - - - - """ elif False: ServiceTemplateVolumeNfsArgsDict: TypeAlias = Mapping[str, Any] @@ -4734,8 +4795,6 @@ def __init__(__self__, *, :param pulumi.Input[str] path: Path that is exported by the NFS server. :param pulumi.Input[str] server: Hostname or IP address of the NFS server :param pulumi.Input[bool] read_only: If true, mount the NFS volume as read only - - - - - """ pulumi.set(__self__, "path", path) pulumi.set(__self__, "server", server) @@ -4771,8 +4830,6 @@ def server(self, value: pulumi.Input[str]): def read_only(self) -> Optional[pulumi.Input[bool]]: """ If true, mount the NFS volume as read only - - - - - """ return pulumi.get(self, "read_only") diff --git a/sdk/python/pulumi_gcp/cloudrunv2/outputs.py b/sdk/python/pulumi_gcp/cloudrunv2/outputs.py index 74d01f6cbb..0d00f90d91 100644 --- a/sdk/python/pulumi_gcp/cloudrunv2/outputs.py +++ b/sdk/python/pulumi_gcp/cloudrunv2/outputs.py @@ -64,6 +64,7 @@ 'ServiceTemplateContainerStartupProbeTcpSocket', 'ServiceTemplateContainerVolumeMount', 'ServiceTemplateScaling', + 'ServiceTemplateServiceMesh', 'ServiceTemplateVolume', 'ServiceTemplateVolumeCloudSqlInstance', 'ServiceTemplateVolumeEmptyDir', @@ -120,6 +121,7 @@ 'GetServiceTemplateContainerStartupProbeTcpSocketResult', 'GetServiceTemplateContainerVolumeMountResult', 'GetServiceTemplateScalingResult', + 'GetServiceTemplateServiceMeshResult', 'GetServiceTemplateVolumeResult', 'GetServiceTemplateVolumeCloudSqlInstanceResult', 'GetServiceTemplateVolumeEmptyDirResult', @@ -1117,9 +1119,9 @@ def __init__(__self__, *, Structure is documented below. :param 'JobTemplateTemplateVolumeEmptyDirArgs' empty_dir: Ephemeral storage used as a shared volume. Structure is documented below. - :param 'JobTemplateTemplateVolumeGcsArgs' gcs: Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + :param 'JobTemplateTemplateVolumeGcsArgs' gcs: Cloud Storage bucket mounted as a volume using GCSFuse. Structure is documented below. - :param 'JobTemplateTemplateVolumeNfsArgs' nfs: NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + :param 'JobTemplateTemplateVolumeNfsArgs' nfs: NFS share mounted as a volume. Structure is documented below. :param 'JobTemplateTemplateVolumeSecretArgs' secret: Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret Structure is documented below. @@ -1166,7 +1168,7 @@ def empty_dir(self) -> Optional['outputs.JobTemplateTemplateVolumeEmptyDir']: @pulumi.getter def gcs(self) -> Optional['outputs.JobTemplateTemplateVolumeGcs']: """ - Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + Cloud Storage bucket mounted as a volume using GCSFuse. Structure is documented below. """ return pulumi.get(self, "gcs") @@ -1175,7 +1177,7 @@ def gcs(self) -> Optional['outputs.JobTemplateTemplateVolumeGcs']: @pulumi.getter def nfs(self) -> Optional['outputs.JobTemplateTemplateVolumeNfs']: """ - NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + NFS share mounted as a volume. Structure is documented below. """ return pulumi.get(self, "nfs") @@ -2037,6 +2039,8 @@ def __key_warning(key: str): suggest = "max_instance_request_concurrency" elif key == "serviceAccount": suggest = "service_account" + elif key == "serviceMesh": + suggest = "service_mesh" elif key == "sessionAffinity": suggest = "session_affinity" elif key == "vpcAccess": @@ -2063,6 +2067,7 @@ def __init__(__self__, *, revision: Optional[str] = None, scaling: Optional['outputs.ServiceTemplateScaling'] = None, service_account: Optional[str] = None, + service_mesh: Optional['outputs.ServiceTemplateServiceMesh'] = None, session_affinity: Optional[bool] = None, timeout: Optional[str] = None, volumes: Optional[Sequence['outputs.ServiceTemplateVolume']] = None, @@ -2087,6 +2092,8 @@ def __init__(__self__, *, :param 'ServiceTemplateScalingArgs' scaling: Scaling settings for this Revision. Structure is documented below. :param str service_account: Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. + :param 'ServiceTemplateServiceMeshArgs' service_mesh: Enables Cloud Service Mesh for this Revision. + Structure is documented below. :param bool session_affinity: Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity :param str timeout: Max allowed time for an instance to respond to a request. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". @@ -2113,6 +2120,8 @@ def __init__(__self__, *, pulumi.set(__self__, "scaling", scaling) if service_account is not None: pulumi.set(__self__, "service_account", service_account) + if service_mesh is not None: + pulumi.set(__self__, "service_mesh", service_mesh) if session_affinity is not None: pulumi.set(__self__, "session_affinity", session_affinity) if timeout is not None: @@ -2204,6 +2213,15 @@ def service_account(self) -> Optional[str]: """ return pulumi.get(self, "service_account") + @property + @pulumi.getter(name="serviceMesh") + def service_mesh(self) -> Optional['outputs.ServiceTemplateServiceMesh']: + """ + Enables Cloud Service Mesh for this Revision. + Structure is documented below. + """ + return pulumi.get(self, "service_mesh") + @property @pulumi.getter(name="sessionAffinity") def session_affinity(self) -> Optional[bool]: @@ -3317,6 +3335,29 @@ def min_instance_count(self) -> Optional[int]: return pulumi.get(self, "min_instance_count") +@pulumi.output_type +class ServiceTemplateServiceMesh(dict): + def __init__(__self__, *, + mesh: Optional[str] = None): + """ + :param str mesh: The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + + - - - + """ + if mesh is not None: + pulumi.set(__self__, "mesh", mesh) + + @property + @pulumi.getter + def mesh(self) -> Optional[str]: + """ + The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + + - - - + """ + return pulumi.get(self, "mesh") + + @pulumi.output_type class ServiceTemplateVolume(dict): @staticmethod @@ -3351,7 +3392,7 @@ def __init__(__self__, *, Structure is documented below. :param 'ServiceTemplateVolumeEmptyDirArgs' empty_dir: Ephemeral storage used as a shared volume. Structure is documented below. - :param 'ServiceTemplateVolumeGcsArgs' gcs: Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + :param 'ServiceTemplateVolumeGcsArgs' gcs: Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. Structure is documented below. :param 'ServiceTemplateVolumeNfsArgs' nfs: Represents an NFS mount. Structure is documented below. @@ -3400,7 +3441,7 @@ def empty_dir(self) -> Optional['outputs.ServiceTemplateVolumeEmptyDir']: @pulumi.getter def gcs(self) -> Optional['outputs.ServiceTemplateVolumeGcs']: """ - Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. Structure is documented below. """ return pulumi.get(self, "gcs") @@ -3569,8 +3610,6 @@ def __init__(__self__, *, :param str path: Path that is exported by the NFS server. :param str server: Hostname or IP address of the NFS server :param bool read_only: If true, mount the NFS volume as read only - - - - - """ pulumi.set(__self__, "path", path) pulumi.set(__self__, "server", server) @@ -3598,8 +3637,6 @@ def server(self) -> str: def read_only(self) -> Optional[bool]: """ If true, mount the NFS volume as read only - - - - - """ return pulumi.get(self, "read_only") @@ -4751,9 +4788,9 @@ def __init__(__self__, *, """ :param Sequence['GetJobTemplateTemplateVolumeCloudSqlInstanceArgs'] cloud_sql_instances: For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. :param Sequence['GetJobTemplateTemplateVolumeEmptyDirArgs'] empty_dirs: Ephemeral storage used as a shared volume. - :param Sequence['GetJobTemplateTemplateVolumeGcArgs'] gcs: Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + :param Sequence['GetJobTemplateTemplateVolumeGcArgs'] gcs: Cloud Storage bucket mounted as a volume using GCSFuse. :param str name: The name of the Cloud Run v2 Job. - :param Sequence['GetJobTemplateTemplateVolumeNfArgs'] nfs: NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + :param Sequence['GetJobTemplateTemplateVolumeNfArgs'] nfs: NFS share mounted as a volume. :param Sequence['GetJobTemplateTemplateVolumeSecretArgs'] secrets: Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret """ pulumi.set(__self__, "cloud_sql_instances", cloud_sql_instances) @@ -4783,7 +4820,7 @@ def empty_dirs(self) -> Sequence['outputs.GetJobTemplateTemplateVolumeEmptyDirRe @pulumi.getter def gcs(self) -> Sequence['outputs.GetJobTemplateTemplateVolumeGcResult']: """ - Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + Cloud Storage bucket mounted as a volume using GCSFuse. """ return pulumi.get(self, "gcs") @@ -4799,7 +4836,7 @@ def name(self) -> str: @pulumi.getter def nfs(self) -> Sequence['outputs.GetJobTemplateTemplateVolumeNfResult']: """ - NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + NFS share mounted as a volume. """ return pulumi.get(self, "nfs") @@ -5364,6 +5401,7 @@ def __init__(__self__, *, revision: str, scalings: Sequence['outputs.GetServiceTemplateScalingResult'], service_account: str, + service_meshes: Sequence['outputs.GetServiceTemplateServiceMeshResult'], session_affinity: bool, timeout: str, volumes: Sequence['outputs.GetServiceTemplateVolumeResult'], @@ -5388,6 +5426,7 @@ def __init__(__self__, *, :param str revision: The unique name for the revision. If this field is omitted, it will be automatically generated based on the Service name. :param Sequence['GetServiceTemplateScalingArgs'] scalings: Scaling settings for this Revision. :param str service_account: Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. + :param Sequence['GetServiceTemplateServiceMeshArgs'] service_meshes: Enables Cloud Service Mesh for this Revision. :param bool session_affinity: Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity :param str timeout: Max allowed time for an instance to respond to a request. @@ -5404,6 +5443,7 @@ def __init__(__self__, *, pulumi.set(__self__, "revision", revision) pulumi.set(__self__, "scalings", scalings) pulumi.set(__self__, "service_account", service_account) + pulumi.set(__self__, "service_meshes", service_meshes) pulumi.set(__self__, "session_affinity", session_affinity) pulumi.set(__self__, "timeout", timeout) pulumi.set(__self__, "volumes", volumes) @@ -5491,6 +5531,14 @@ def service_account(self) -> str: """ return pulumi.get(self, "service_account") + @property + @pulumi.getter(name="serviceMeshes") + def service_meshes(self) -> Sequence['outputs.GetServiceTemplateServiceMeshResult']: + """ + Enables Cloud Service Mesh for this Revision. + """ + return pulumi.get(self, "service_meshes") + @property @pulumi.getter(name="sessionAffinity") def session_affinity(self) -> bool: @@ -6307,6 +6355,24 @@ def min_instance_count(self) -> int: return pulumi.get(self, "min_instance_count") +@pulumi.output_type +class GetServiceTemplateServiceMeshResult(dict): + def __init__(__self__, *, + mesh: str): + """ + :param str mesh: The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + """ + pulumi.set(__self__, "mesh", mesh) + + @property + @pulumi.getter + def mesh(self) -> str: + """ + The Mesh resource name. For more information see https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.meshes#resource:-mesh. + """ + return pulumi.get(self, "mesh") + + @pulumi.output_type class GetServiceTemplateVolumeResult(dict): def __init__(__self__, *, @@ -6319,7 +6385,7 @@ def __init__(__self__, *, """ :param Sequence['GetServiceTemplateVolumeCloudSqlInstanceArgs'] cloud_sql_instances: For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. :param Sequence['GetServiceTemplateVolumeEmptyDirArgs'] empty_dirs: Ephemeral storage used as a shared volume. - :param Sequence['GetServiceTemplateVolumeGcArgs'] gcs: Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + :param Sequence['GetServiceTemplateVolumeGcArgs'] gcs: Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. :param str name: The name of the Cloud Run v2 Service. :param Sequence['GetServiceTemplateVolumeNfArgs'] nfs: Represents an NFS mount. :param Sequence['GetServiceTemplateVolumeSecretArgs'] secrets: Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret @@ -6351,7 +6417,7 @@ def empty_dirs(self) -> Sequence['outputs.GetServiceTemplateVolumeEmptyDirResult @pulumi.getter def gcs(self) -> Sequence['outputs.GetServiceTemplateVolumeGcResult']: """ - Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment. """ return pulumi.get(self, "gcs") diff --git a/sdk/python/pulumi_gcp/cloudrunv2/service.py b/sdk/python/pulumi_gcp/cloudrunv2/service.py index 4ac77a73bd..3694d60c4d 100644 --- a/sdk/python/pulumi_gcp/cloudrunv2/service.py +++ b/sdk/python/pulumi_gcp/cloudrunv2/service.py @@ -1338,7 +1338,6 @@ def __init__(__self__, name="cloudrun-service", location="us-central1", deletion_protection=False, - launch_stage="BETA", template={ "execution_environment": "EXECUTION_ENVIRONMENT_GEN2", "containers": [{ @@ -1380,7 +1379,6 @@ def __init__(__self__, location="us-central1", deletion_protection=False, ingress="INGRESS_TRAFFIC_ALL", - launch_stage="BETA", template={ "execution_environment": "EXECUTION_ENVIRONMENT_GEN2", "containers": [{ @@ -1406,6 +1404,31 @@ def __init__(__self__, }], }) ``` + ### Cloudrunv2 Service Mesh + + ```python + import pulumi + import pulumi_gcp as gcp + import pulumi_time as time + + mesh = gcp.networkservices.Mesh("mesh", name="network-services-mesh") + wait_for_mesh = time.index.Sleep("wait_for_mesh", create_duration=1m, + opts = pulumi.ResourceOptions(depends_on=[mesh])) + default = gcp.cloudrunv2.Service("default", + name="cloudrun-service", + deletion_protection=False, + location="us-central1", + launch_stage="BETA", + template={ + "containers": [{ + "image": "us-docker.pkg.dev/cloudrun/container/hello", + }], + "service_mesh": { + "mesh": mesh.id, + }, + }, + opts = pulumi.ResourceOptions(depends_on=[wait_for_mesh])) + ``` ## Import @@ -1806,7 +1829,6 @@ def __init__(__self__, name="cloudrun-service", location="us-central1", deletion_protection=False, - launch_stage="BETA", template={ "execution_environment": "EXECUTION_ENVIRONMENT_GEN2", "containers": [{ @@ -1848,7 +1870,6 @@ def __init__(__self__, location="us-central1", deletion_protection=False, ingress="INGRESS_TRAFFIC_ALL", - launch_stage="BETA", template={ "execution_environment": "EXECUTION_ENVIRONMENT_GEN2", "containers": [{ @@ -1874,6 +1895,31 @@ def __init__(__self__, }], }) ``` + ### Cloudrunv2 Service Mesh + + ```python + import pulumi + import pulumi_gcp as gcp + import pulumi_time as time + + mesh = gcp.networkservices.Mesh("mesh", name="network-services-mesh") + wait_for_mesh = time.index.Sleep("wait_for_mesh", create_duration=1m, + opts = pulumi.ResourceOptions(depends_on=[mesh])) + default = gcp.cloudrunv2.Service("default", + name="cloudrun-service", + deletion_protection=False, + location="us-central1", + launch_stage="BETA", + template={ + "containers": [{ + "image": "us-docker.pkg.dev/cloudrun/container/hello", + }], + "service_mesh": { + "mesh": mesh.id, + }, + }, + opts = pulumi.ResourceOptions(depends_on=[wait_for_mesh])) + ``` ## Import diff --git a/sdk/python/pulumi_gcp/cloudtasks/_inputs.py b/sdk/python/pulumi_gcp/cloudtasks/_inputs.py index 06ef3af605..e253873778 100644 --- a/sdk/python/pulumi_gcp/cloudtasks/_inputs.py +++ b/sdk/python/pulumi_gcp/cloudtasks/_inputs.py @@ -17,6 +17,22 @@ __all__ = [ 'QueueAppEngineRoutingOverrideArgs', 'QueueAppEngineRoutingOverrideArgsDict', + 'QueueHttpTargetArgs', + 'QueueHttpTargetArgsDict', + 'QueueHttpTargetHeaderOverrideArgs', + 'QueueHttpTargetHeaderOverrideArgsDict', + 'QueueHttpTargetHeaderOverrideHeaderArgs', + 'QueueHttpTargetHeaderOverrideHeaderArgsDict', + 'QueueHttpTargetOauthTokenArgs', + 'QueueHttpTargetOauthTokenArgsDict', + 'QueueHttpTargetOidcTokenArgs', + 'QueueHttpTargetOidcTokenArgsDict', + 'QueueHttpTargetUriOverrideArgs', + 'QueueHttpTargetUriOverrideArgsDict', + 'QueueHttpTargetUriOverridePathOverrideArgs', + 'QueueHttpTargetUriOverridePathOverrideArgsDict', + 'QueueHttpTargetUriOverrideQueryOverrideArgs', + 'QueueHttpTargetUriOverrideQueryOverrideArgsDict', 'QueueIamBindingConditionArgs', 'QueueIamBindingConditionArgsDict', 'QueueIamMemberConditionArgs', @@ -135,6 +151,620 @@ def version(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "version", value) +if not MYPY: + class QueueHttpTargetArgsDict(TypedDict): + header_overrides: NotRequired[pulumi.Input[Sequence[pulumi.Input['QueueHttpTargetHeaderOverrideArgsDict']]]] + """ + HTTP target headers. + This map contains the header field names and values. + Headers will be set when running the CreateTask and/or BufferTask. + These headers represent a subset of the headers that will be configured for the task's HTTP request. + Some HTTP request headers will be ignored or replaced. + Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + Structure is documented below. + """ + http_method: NotRequired[pulumi.Input[str]] + """ + The HTTP method to use for the request. + When specified, it overrides HttpRequest for the task. + Note that if the value is set to GET the body of the task will be ignored at execution time. + Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + """ + oauth_token: NotRequired[pulumi.Input['QueueHttpTargetOauthTokenArgsDict']] + """ + If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + Structure is documented below. + """ + oidc_token: NotRequired[pulumi.Input['QueueHttpTargetOidcTokenArgsDict']] + """ + If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + Structure is documented below. + """ + uri_override: NotRequired[pulumi.Input['QueueHttpTargetUriOverrideArgsDict']] + """ + URI override. + When specified, overrides the execution URI for all the tasks in the queue. + Structure is documented below. + """ +elif False: + QueueHttpTargetArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class QueueHttpTargetArgs: + def __init__(__self__, *, + header_overrides: Optional[pulumi.Input[Sequence[pulumi.Input['QueueHttpTargetHeaderOverrideArgs']]]] = None, + http_method: Optional[pulumi.Input[str]] = None, + oauth_token: Optional[pulumi.Input['QueueHttpTargetOauthTokenArgs']] = None, + oidc_token: Optional[pulumi.Input['QueueHttpTargetOidcTokenArgs']] = None, + uri_override: Optional[pulumi.Input['QueueHttpTargetUriOverrideArgs']] = None): + """ + :param pulumi.Input[Sequence[pulumi.Input['QueueHttpTargetHeaderOverrideArgs']]] header_overrides: HTTP target headers. + This map contains the header field names and values. + Headers will be set when running the CreateTask and/or BufferTask. + These headers represent a subset of the headers that will be configured for the task's HTTP request. + Some HTTP request headers will be ignored or replaced. + Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + Structure is documented below. + :param pulumi.Input[str] http_method: The HTTP method to use for the request. + When specified, it overrides HttpRequest for the task. + Note that if the value is set to GET the body of the task will be ignored at execution time. + Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + :param pulumi.Input['QueueHttpTargetOauthTokenArgs'] oauth_token: If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + Structure is documented below. + :param pulumi.Input['QueueHttpTargetOidcTokenArgs'] oidc_token: If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + Structure is documented below. + :param pulumi.Input['QueueHttpTargetUriOverrideArgs'] uri_override: URI override. + When specified, overrides the execution URI for all the tasks in the queue. + Structure is documented below. + """ + if header_overrides is not None: + pulumi.set(__self__, "header_overrides", header_overrides) + if http_method is not None: + pulumi.set(__self__, "http_method", http_method) + if oauth_token is not None: + pulumi.set(__self__, "oauth_token", oauth_token) + if oidc_token is not None: + pulumi.set(__self__, "oidc_token", oidc_token) + if uri_override is not None: + pulumi.set(__self__, "uri_override", uri_override) + + @property + @pulumi.getter(name="headerOverrides") + def header_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['QueueHttpTargetHeaderOverrideArgs']]]]: + """ + HTTP target headers. + This map contains the header field names and values. + Headers will be set when running the CreateTask and/or BufferTask. + These headers represent a subset of the headers that will be configured for the task's HTTP request. + Some HTTP request headers will be ignored or replaced. + Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + Structure is documented below. + """ + return pulumi.get(self, "header_overrides") + + @header_overrides.setter + def header_overrides(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['QueueHttpTargetHeaderOverrideArgs']]]]): + pulumi.set(self, "header_overrides", value) + + @property + @pulumi.getter(name="httpMethod") + def http_method(self) -> Optional[pulumi.Input[str]]: + """ + The HTTP method to use for the request. + When specified, it overrides HttpRequest for the task. + Note that if the value is set to GET the body of the task will be ignored at execution time. + Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + """ + return pulumi.get(self, "http_method") + + @http_method.setter + def http_method(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "http_method", value) + + @property + @pulumi.getter(name="oauthToken") + def oauth_token(self) -> Optional[pulumi.Input['QueueHttpTargetOauthTokenArgs']]: + """ + If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + Structure is documented below. + """ + return pulumi.get(self, "oauth_token") + + @oauth_token.setter + def oauth_token(self, value: Optional[pulumi.Input['QueueHttpTargetOauthTokenArgs']]): + pulumi.set(self, "oauth_token", value) + + @property + @pulumi.getter(name="oidcToken") + def oidc_token(self) -> Optional[pulumi.Input['QueueHttpTargetOidcTokenArgs']]: + """ + If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + Structure is documented below. + """ + return pulumi.get(self, "oidc_token") + + @oidc_token.setter + def oidc_token(self, value: Optional[pulumi.Input['QueueHttpTargetOidcTokenArgs']]): + pulumi.set(self, "oidc_token", value) + + @property + @pulumi.getter(name="uriOverride") + def uri_override(self) -> Optional[pulumi.Input['QueueHttpTargetUriOverrideArgs']]: + """ + URI override. + When specified, overrides the execution URI for all the tasks in the queue. + Structure is documented below. + """ + return pulumi.get(self, "uri_override") + + @uri_override.setter + def uri_override(self, value: Optional[pulumi.Input['QueueHttpTargetUriOverrideArgs']]): + pulumi.set(self, "uri_override", value) + + +if not MYPY: + class QueueHttpTargetHeaderOverrideArgsDict(TypedDict): + header: pulumi.Input['QueueHttpTargetHeaderOverrideHeaderArgsDict'] + """ + Header embodying a key and a value. + Structure is documented below. + """ +elif False: + QueueHttpTargetHeaderOverrideArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class QueueHttpTargetHeaderOverrideArgs: + def __init__(__self__, *, + header: pulumi.Input['QueueHttpTargetHeaderOverrideHeaderArgs']): + """ + :param pulumi.Input['QueueHttpTargetHeaderOverrideHeaderArgs'] header: Header embodying a key and a value. + Structure is documented below. + """ + pulumi.set(__self__, "header", header) + + @property + @pulumi.getter + def header(self) -> pulumi.Input['QueueHttpTargetHeaderOverrideHeaderArgs']: + """ + Header embodying a key and a value. + Structure is documented below. + """ + return pulumi.get(self, "header") + + @header.setter + def header(self, value: pulumi.Input['QueueHttpTargetHeaderOverrideHeaderArgs']): + pulumi.set(self, "header", value) + + +if not MYPY: + class QueueHttpTargetHeaderOverrideHeaderArgsDict(TypedDict): + key: pulumi.Input[str] + """ + The Key of the header. + """ + value: pulumi.Input[str] + """ + The Value of the header. + """ +elif False: + QueueHttpTargetHeaderOverrideHeaderArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class QueueHttpTargetHeaderOverrideHeaderArgs: + def __init__(__self__, *, + key: pulumi.Input[str], + value: pulumi.Input[str]): + """ + :param pulumi.Input[str] key: The Key of the header. + :param pulumi.Input[str] value: The Value of the header. + """ + pulumi.set(__self__, "key", key) + pulumi.set(__self__, "value", value) + + @property + @pulumi.getter + def key(self) -> pulumi.Input[str]: + """ + The Key of the header. + """ + return pulumi.get(self, "key") + + @key.setter + def key(self, value: pulumi.Input[str]): + pulumi.set(self, "key", value) + + @property + @pulumi.getter + def value(self) -> pulumi.Input[str]: + """ + The Value of the header. + """ + return pulumi.get(self, "value") + + @value.setter + def value(self, value: pulumi.Input[str]): + pulumi.set(self, "value", value) + + +if not MYPY: + class QueueHttpTargetOauthTokenArgsDict(TypedDict): + service_account_email: pulumi.Input[str] + """ + Service account email to be used for generating OAuth token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + """ + scope: NotRequired[pulumi.Input[str]] + """ + OAuth scope to be used for generating OAuth access token. + If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + """ +elif False: + QueueHttpTargetOauthTokenArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class QueueHttpTargetOauthTokenArgs: + def __init__(__self__, *, + service_account_email: pulumi.Input[str], + scope: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] service_account_email: Service account email to be used for generating OAuth token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + :param pulumi.Input[str] scope: OAuth scope to be used for generating OAuth access token. + If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + """ + pulumi.set(__self__, "service_account_email", service_account_email) + if scope is not None: + pulumi.set(__self__, "scope", scope) + + @property + @pulumi.getter(name="serviceAccountEmail") + def service_account_email(self) -> pulumi.Input[str]: + """ + Service account email to be used for generating OAuth token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + """ + return pulumi.get(self, "service_account_email") + + @service_account_email.setter + def service_account_email(self, value: pulumi.Input[str]): + pulumi.set(self, "service_account_email", value) + + @property + @pulumi.getter + def scope(self) -> Optional[pulumi.Input[str]]: + """ + OAuth scope to be used for generating OAuth access token. + If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + """ + return pulumi.get(self, "scope") + + @scope.setter + def scope(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "scope", value) + + +if not MYPY: + class QueueHttpTargetOidcTokenArgsDict(TypedDict): + service_account_email: pulumi.Input[str] + """ + Service account email to be used for generating OIDC token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + """ + audience: NotRequired[pulumi.Input[str]] + """ + Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + """ +elif False: + QueueHttpTargetOidcTokenArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class QueueHttpTargetOidcTokenArgs: + def __init__(__self__, *, + service_account_email: pulumi.Input[str], + audience: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] service_account_email: Service account email to be used for generating OIDC token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + :param pulumi.Input[str] audience: Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + """ + pulumi.set(__self__, "service_account_email", service_account_email) + if audience is not None: + pulumi.set(__self__, "audience", audience) + + @property + @pulumi.getter(name="serviceAccountEmail") + def service_account_email(self) -> pulumi.Input[str]: + """ + Service account email to be used for generating OIDC token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + """ + return pulumi.get(self, "service_account_email") + + @service_account_email.setter + def service_account_email(self, value: pulumi.Input[str]): + pulumi.set(self, "service_account_email", value) + + @property + @pulumi.getter + def audience(self) -> Optional[pulumi.Input[str]]: + """ + Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + """ + return pulumi.get(self, "audience") + + @audience.setter + def audience(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "audience", value) + + +if not MYPY: + class QueueHttpTargetUriOverrideArgsDict(TypedDict): + host: NotRequired[pulumi.Input[str]] + """ + Host override. + When specified, replaces the host part of the task URL. + For example, if the task URL is "https://www.google.com", and host value + is set to "example.net", the overridden URI will be changed to "https://example.net". + Host value cannot be an empty string (INVALID_ARGUMENT). + """ + path_override: NotRequired[pulumi.Input['QueueHttpTargetUriOverridePathOverrideArgsDict']] + """ + URI path. + When specified, replaces the existing path of the task URL. + Setting the path value to an empty string clears the URI path segment. + Structure is documented below. + """ + port: NotRequired[pulumi.Input[str]] + """ + Port override. + When specified, replaces the port part of the task URI. + For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + Note that the port value must be a positive integer. + Setting the port to 0 (Zero) clears the URI port. + """ + query_override: NotRequired[pulumi.Input['QueueHttpTargetUriOverrideQueryOverrideArgsDict']] + """ + URI query. + When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + Structure is documented below. + """ + scheme: NotRequired[pulumi.Input[str]] + """ + Scheme override. + When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + Possible values are: `HTTP`, `HTTPS`. + """ + uri_override_enforce_mode: NotRequired[pulumi.Input[str]] + """ + URI Override Enforce Mode + When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + """ +elif False: + QueueHttpTargetUriOverrideArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class QueueHttpTargetUriOverrideArgs: + def __init__(__self__, *, + host: Optional[pulumi.Input[str]] = None, + path_override: Optional[pulumi.Input['QueueHttpTargetUriOverridePathOverrideArgs']] = None, + port: Optional[pulumi.Input[str]] = None, + query_override: Optional[pulumi.Input['QueueHttpTargetUriOverrideQueryOverrideArgs']] = None, + scheme: Optional[pulumi.Input[str]] = None, + uri_override_enforce_mode: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] host: Host override. + When specified, replaces the host part of the task URL. + For example, if the task URL is "https://www.google.com", and host value + is set to "example.net", the overridden URI will be changed to "https://example.net". + Host value cannot be an empty string (INVALID_ARGUMENT). + :param pulumi.Input['QueueHttpTargetUriOverridePathOverrideArgs'] path_override: URI path. + When specified, replaces the existing path of the task URL. + Setting the path value to an empty string clears the URI path segment. + Structure is documented below. + :param pulumi.Input[str] port: Port override. + When specified, replaces the port part of the task URI. + For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + Note that the port value must be a positive integer. + Setting the port to 0 (Zero) clears the URI port. + :param pulumi.Input['QueueHttpTargetUriOverrideQueryOverrideArgs'] query_override: URI query. + When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + Structure is documented below. + :param pulumi.Input[str] scheme: Scheme override. + When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + Possible values are: `HTTP`, `HTTPS`. + :param pulumi.Input[str] uri_override_enforce_mode: URI Override Enforce Mode + When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + """ + if host is not None: + pulumi.set(__self__, "host", host) + if path_override is not None: + pulumi.set(__self__, "path_override", path_override) + if port is not None: + pulumi.set(__self__, "port", port) + if query_override is not None: + pulumi.set(__self__, "query_override", query_override) + if scheme is not None: + pulumi.set(__self__, "scheme", scheme) + if uri_override_enforce_mode is not None: + pulumi.set(__self__, "uri_override_enforce_mode", uri_override_enforce_mode) + + @property + @pulumi.getter + def host(self) -> Optional[pulumi.Input[str]]: + """ + Host override. + When specified, replaces the host part of the task URL. + For example, if the task URL is "https://www.google.com", and host value + is set to "example.net", the overridden URI will be changed to "https://example.net". + Host value cannot be an empty string (INVALID_ARGUMENT). + """ + return pulumi.get(self, "host") + + @host.setter + def host(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "host", value) + + @property + @pulumi.getter(name="pathOverride") + def path_override(self) -> Optional[pulumi.Input['QueueHttpTargetUriOverridePathOverrideArgs']]: + """ + URI path. + When specified, replaces the existing path of the task URL. + Setting the path value to an empty string clears the URI path segment. + Structure is documented below. + """ + return pulumi.get(self, "path_override") + + @path_override.setter + def path_override(self, value: Optional[pulumi.Input['QueueHttpTargetUriOverridePathOverrideArgs']]): + pulumi.set(self, "path_override", value) + + @property + @pulumi.getter + def port(self) -> Optional[pulumi.Input[str]]: + """ + Port override. + When specified, replaces the port part of the task URI. + For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + Note that the port value must be a positive integer. + Setting the port to 0 (Zero) clears the URI port. + """ + return pulumi.get(self, "port") + + @port.setter + def port(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "port", value) + + @property + @pulumi.getter(name="queryOverride") + def query_override(self) -> Optional[pulumi.Input['QueueHttpTargetUriOverrideQueryOverrideArgs']]: + """ + URI query. + When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + Structure is documented below. + """ + return pulumi.get(self, "query_override") + + @query_override.setter + def query_override(self, value: Optional[pulumi.Input['QueueHttpTargetUriOverrideQueryOverrideArgs']]): + pulumi.set(self, "query_override", value) + + @property + @pulumi.getter + def scheme(self) -> Optional[pulumi.Input[str]]: + """ + Scheme override. + When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + Possible values are: `HTTP`, `HTTPS`. + """ + return pulumi.get(self, "scheme") + + @scheme.setter + def scheme(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "scheme", value) + + @property + @pulumi.getter(name="uriOverrideEnforceMode") + def uri_override_enforce_mode(self) -> Optional[pulumi.Input[str]]: + """ + URI Override Enforce Mode + When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + """ + return pulumi.get(self, "uri_override_enforce_mode") + + @uri_override_enforce_mode.setter + def uri_override_enforce_mode(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "uri_override_enforce_mode", value) + + +if not MYPY: + class QueueHttpTargetUriOverridePathOverrideArgsDict(TypedDict): + path: NotRequired[pulumi.Input[str]] + """ + The URI path (e.g., /users/1234). Default is an empty string. + """ +elif False: + QueueHttpTargetUriOverridePathOverrideArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class QueueHttpTargetUriOverridePathOverrideArgs: + def __init__(__self__, *, + path: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] path: The URI path (e.g., /users/1234). Default is an empty string. + """ + if path is not None: + pulumi.set(__self__, "path", path) + + @property + @pulumi.getter + def path(self) -> Optional[pulumi.Input[str]]: + """ + The URI path (e.g., /users/1234). Default is an empty string. + """ + return pulumi.get(self, "path") + + @path.setter + def path(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "path", value) + + +if not MYPY: + class QueueHttpTargetUriOverrideQueryOverrideArgsDict(TypedDict): + query_params: NotRequired[pulumi.Input[str]] + """ + The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + """ +elif False: + QueueHttpTargetUriOverrideQueryOverrideArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class QueueHttpTargetUriOverrideQueryOverrideArgs: + def __init__(__self__, *, + query_params: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] query_params: The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + """ + if query_params is not None: + pulumi.set(__self__, "query_params", query_params) + + @property + @pulumi.getter(name="queryParams") + def query_params(self) -> Optional[pulumi.Input[str]]: + """ + The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + """ + return pulumi.get(self, "query_params") + + @query_params.setter + def query_params(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "query_params", value) + + if not MYPY: class QueueIamBindingConditionArgsDict(TypedDict): expression: pulumi.Input[str] diff --git a/sdk/python/pulumi_gcp/cloudtasks/outputs.py b/sdk/python/pulumi_gcp/cloudtasks/outputs.py index 6d73328d3c..b31d93c73d 100644 --- a/sdk/python/pulumi_gcp/cloudtasks/outputs.py +++ b/sdk/python/pulumi_gcp/cloudtasks/outputs.py @@ -13,9 +13,18 @@ else: from typing_extensions import NotRequired, TypedDict, TypeAlias from .. import _utilities +from . import outputs __all__ = [ 'QueueAppEngineRoutingOverride', + 'QueueHttpTarget', + 'QueueHttpTargetHeaderOverride', + 'QueueHttpTargetHeaderOverrideHeader', + 'QueueHttpTargetOauthToken', + 'QueueHttpTargetOidcToken', + 'QueueHttpTargetUriOverride', + 'QueueHttpTargetUriOverridePathOverride', + 'QueueHttpTargetUriOverrideQueryOverride', 'QueueIamBindingCondition', 'QueueIamMemberCondition', 'QueueRateLimits', @@ -86,6 +95,476 @@ def version(self) -> Optional[str]: return pulumi.get(self, "version") +@pulumi.output_type +class QueueHttpTarget(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "headerOverrides": + suggest = "header_overrides" + elif key == "httpMethod": + suggest = "http_method" + elif key == "oauthToken": + suggest = "oauth_token" + elif key == "oidcToken": + suggest = "oidc_token" + elif key == "uriOverride": + suggest = "uri_override" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in QueueHttpTarget. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + QueueHttpTarget.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + QueueHttpTarget.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + header_overrides: Optional[Sequence['outputs.QueueHttpTargetHeaderOverride']] = None, + http_method: Optional[str] = None, + oauth_token: Optional['outputs.QueueHttpTargetOauthToken'] = None, + oidc_token: Optional['outputs.QueueHttpTargetOidcToken'] = None, + uri_override: Optional['outputs.QueueHttpTargetUriOverride'] = None): + """ + :param Sequence['QueueHttpTargetHeaderOverrideArgs'] header_overrides: HTTP target headers. + This map contains the header field names and values. + Headers will be set when running the CreateTask and/or BufferTask. + These headers represent a subset of the headers that will be configured for the task's HTTP request. + Some HTTP request headers will be ignored or replaced. + Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + Structure is documented below. + :param str http_method: The HTTP method to use for the request. + When specified, it overrides HttpRequest for the task. + Note that if the value is set to GET the body of the task will be ignored at execution time. + Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + :param 'QueueHttpTargetOauthTokenArgs' oauth_token: If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + Structure is documented below. + :param 'QueueHttpTargetOidcTokenArgs' oidc_token: If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + Structure is documented below. + :param 'QueueHttpTargetUriOverrideArgs' uri_override: URI override. + When specified, overrides the execution URI for all the tasks in the queue. + Structure is documented below. + """ + if header_overrides is not None: + pulumi.set(__self__, "header_overrides", header_overrides) + if http_method is not None: + pulumi.set(__self__, "http_method", http_method) + if oauth_token is not None: + pulumi.set(__self__, "oauth_token", oauth_token) + if oidc_token is not None: + pulumi.set(__self__, "oidc_token", oidc_token) + if uri_override is not None: + pulumi.set(__self__, "uri_override", uri_override) + + @property + @pulumi.getter(name="headerOverrides") + def header_overrides(self) -> Optional[Sequence['outputs.QueueHttpTargetHeaderOverride']]: + """ + HTTP target headers. + This map contains the header field names and values. + Headers will be set when running the CreateTask and/or BufferTask. + These headers represent a subset of the headers that will be configured for the task's HTTP request. + Some HTTP request headers will be ignored or replaced. + Headers which can have multiple values (according to RFC2616) can be specified using comma-separated values. + The size of the headers must be less than 80KB. Queue-level headers to override headers of all the tasks in the queue. + Structure is documented below. + """ + return pulumi.get(self, "header_overrides") + + @property + @pulumi.getter(name="httpMethod") + def http_method(self) -> Optional[str]: + """ + The HTTP method to use for the request. + When specified, it overrides HttpRequest for the task. + Note that if the value is set to GET the body of the task will be ignored at execution time. + Possible values are: `HTTP_METHOD_UNSPECIFIED`, `POST`, `GET`, `HEAD`, `PUT`, `DELETE`, `PATCH`, `OPTIONS`. + """ + return pulumi.get(self, "http_method") + + @property + @pulumi.getter(name="oauthToken") + def oauth_token(self) -> Optional['outputs.QueueHttpTargetOauthToken']: + """ + If specified, an OAuth token is generated and attached as the Authorization header in the HTTP request. + This type of authorization should generally be used only when calling Google APIs hosted on *.googleapis.com. + Note that both the service account email and the scope MUST be specified when using the queue-level authorization override. + Structure is documented below. + """ + return pulumi.get(self, "oauth_token") + + @property + @pulumi.getter(name="oidcToken") + def oidc_token(self) -> Optional['outputs.QueueHttpTargetOidcToken']: + """ + If specified, an OIDC token is generated and attached as an Authorization header in the HTTP request. + This type of authorization can be used for many scenarios, including calling Cloud Run, or endpoints where you intend to validate the token yourself. + Note that both the service account email and the audience MUST be specified when using the queue-level authorization override. + Structure is documented below. + """ + return pulumi.get(self, "oidc_token") + + @property + @pulumi.getter(name="uriOverride") + def uri_override(self) -> Optional['outputs.QueueHttpTargetUriOverride']: + """ + URI override. + When specified, overrides the execution URI for all the tasks in the queue. + Structure is documented below. + """ + return pulumi.get(self, "uri_override") + + +@pulumi.output_type +class QueueHttpTargetHeaderOverride(dict): + def __init__(__self__, *, + header: 'outputs.QueueHttpTargetHeaderOverrideHeader'): + """ + :param 'QueueHttpTargetHeaderOverrideHeaderArgs' header: Header embodying a key and a value. + Structure is documented below. + """ + pulumi.set(__self__, "header", header) + + @property + @pulumi.getter + def header(self) -> 'outputs.QueueHttpTargetHeaderOverrideHeader': + """ + Header embodying a key and a value. + Structure is documented below. + """ + return pulumi.get(self, "header") + + +@pulumi.output_type +class QueueHttpTargetHeaderOverrideHeader(dict): + def __init__(__self__, *, + key: str, + value: str): + """ + :param str key: The Key of the header. + :param str value: The Value of the header. + """ + pulumi.set(__self__, "key", key) + pulumi.set(__self__, "value", value) + + @property + @pulumi.getter + def key(self) -> str: + """ + The Key of the header. + """ + return pulumi.get(self, "key") + + @property + @pulumi.getter + def value(self) -> str: + """ + The Value of the header. + """ + return pulumi.get(self, "value") + + +@pulumi.output_type +class QueueHttpTargetOauthToken(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "serviceAccountEmail": + suggest = "service_account_email" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in QueueHttpTargetOauthToken. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + QueueHttpTargetOauthToken.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + QueueHttpTargetOauthToken.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + service_account_email: str, + scope: Optional[str] = None): + """ + :param str service_account_email: Service account email to be used for generating OAuth token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + :param str scope: OAuth scope to be used for generating OAuth access token. + If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + """ + pulumi.set(__self__, "service_account_email", service_account_email) + if scope is not None: + pulumi.set(__self__, "scope", scope) + + @property + @pulumi.getter(name="serviceAccountEmail") + def service_account_email(self) -> str: + """ + Service account email to be used for generating OAuth token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + """ + return pulumi.get(self, "service_account_email") + + @property + @pulumi.getter + def scope(self) -> Optional[str]: + """ + OAuth scope to be used for generating OAuth access token. + If not specified, "https://www.googleapis.com/auth/cloud-platform" will be used. + """ + return pulumi.get(self, "scope") + + +@pulumi.output_type +class QueueHttpTargetOidcToken(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "serviceAccountEmail": + suggest = "service_account_email" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in QueueHttpTargetOidcToken. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + QueueHttpTargetOidcToken.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + QueueHttpTargetOidcToken.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + service_account_email: str, + audience: Optional[str] = None): + """ + :param str service_account_email: Service account email to be used for generating OIDC token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + :param str audience: Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + """ + pulumi.set(__self__, "service_account_email", service_account_email) + if audience is not None: + pulumi.set(__self__, "audience", audience) + + @property + @pulumi.getter(name="serviceAccountEmail") + def service_account_email(self) -> str: + """ + Service account email to be used for generating OIDC token. + The service account must be within the same project as the queue. + The caller must have iam.serviceAccounts.actAs permission for the service account. + """ + return pulumi.get(self, "service_account_email") + + @property + @pulumi.getter + def audience(self) -> Optional[str]: + """ + Audience to be used when generating OIDC token. If not specified, the URI specified in target will be used. + """ + return pulumi.get(self, "audience") + + +@pulumi.output_type +class QueueHttpTargetUriOverride(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "pathOverride": + suggest = "path_override" + elif key == "queryOverride": + suggest = "query_override" + elif key == "uriOverrideEnforceMode": + suggest = "uri_override_enforce_mode" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in QueueHttpTargetUriOverride. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + QueueHttpTargetUriOverride.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + QueueHttpTargetUriOverride.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + host: Optional[str] = None, + path_override: Optional['outputs.QueueHttpTargetUriOverridePathOverride'] = None, + port: Optional[str] = None, + query_override: Optional['outputs.QueueHttpTargetUriOverrideQueryOverride'] = None, + scheme: Optional[str] = None, + uri_override_enforce_mode: Optional[str] = None): + """ + :param str host: Host override. + When specified, replaces the host part of the task URL. + For example, if the task URL is "https://www.google.com", and host value + is set to "example.net", the overridden URI will be changed to "https://example.net". + Host value cannot be an empty string (INVALID_ARGUMENT). + :param 'QueueHttpTargetUriOverridePathOverrideArgs' path_override: URI path. + When specified, replaces the existing path of the task URL. + Setting the path value to an empty string clears the URI path segment. + Structure is documented below. + :param str port: Port override. + When specified, replaces the port part of the task URI. + For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + Note that the port value must be a positive integer. + Setting the port to 0 (Zero) clears the URI port. + :param 'QueueHttpTargetUriOverrideQueryOverrideArgs' query_override: URI query. + When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + Structure is documented below. + :param str scheme: Scheme override. + When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + Possible values are: `HTTP`, `HTTPS`. + :param str uri_override_enforce_mode: URI Override Enforce Mode + When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + """ + if host is not None: + pulumi.set(__self__, "host", host) + if path_override is not None: + pulumi.set(__self__, "path_override", path_override) + if port is not None: + pulumi.set(__self__, "port", port) + if query_override is not None: + pulumi.set(__self__, "query_override", query_override) + if scheme is not None: + pulumi.set(__self__, "scheme", scheme) + if uri_override_enforce_mode is not None: + pulumi.set(__self__, "uri_override_enforce_mode", uri_override_enforce_mode) + + @property + @pulumi.getter + def host(self) -> Optional[str]: + """ + Host override. + When specified, replaces the host part of the task URL. + For example, if the task URL is "https://www.google.com", and host value + is set to "example.net", the overridden URI will be changed to "https://example.net". + Host value cannot be an empty string (INVALID_ARGUMENT). + """ + return pulumi.get(self, "host") + + @property + @pulumi.getter(name="pathOverride") + def path_override(self) -> Optional['outputs.QueueHttpTargetUriOverridePathOverride']: + """ + URI path. + When specified, replaces the existing path of the task URL. + Setting the path value to an empty string clears the URI path segment. + Structure is documented below. + """ + return pulumi.get(self, "path_override") + + @property + @pulumi.getter + def port(self) -> Optional[str]: + """ + Port override. + When specified, replaces the port part of the task URI. + For instance, for a URI http://www.google.com/foo and port=123, the overridden URI becomes http://www.google.com:123/foo. + Note that the port value must be a positive integer. + Setting the port to 0 (Zero) clears the URI port. + """ + return pulumi.get(self, "port") + + @property + @pulumi.getter(name="queryOverride") + def query_override(self) -> Optional['outputs.QueueHttpTargetUriOverrideQueryOverride']: + """ + URI query. + When specified, replaces the query part of the task URI. Setting the query value to an empty string clears the URI query segment. + Structure is documented below. + """ + return pulumi.get(self, "query_override") + + @property + @pulumi.getter + def scheme(self) -> Optional[str]: + """ + Scheme override. + When specified, the task URI scheme is replaced by the provided value (HTTP or HTTPS). + Possible values are: `HTTP`, `HTTPS`. + """ + return pulumi.get(self, "scheme") + + @property + @pulumi.getter(name="uriOverrideEnforceMode") + def uri_override_enforce_mode(self) -> Optional[str]: + """ + URI Override Enforce Mode + When specified, determines the Target UriOverride mode. If not specified, it defaults to ALWAYS. + Possible values are: `ALWAYS`, `IF_NOT_EXISTS`. + """ + return pulumi.get(self, "uri_override_enforce_mode") + + +@pulumi.output_type +class QueueHttpTargetUriOverridePathOverride(dict): + def __init__(__self__, *, + path: Optional[str] = None): + """ + :param str path: The URI path (e.g., /users/1234). Default is an empty string. + """ + if path is not None: + pulumi.set(__self__, "path", path) + + @property + @pulumi.getter + def path(self) -> Optional[str]: + """ + The URI path (e.g., /users/1234). Default is an empty string. + """ + return pulumi.get(self, "path") + + +@pulumi.output_type +class QueueHttpTargetUriOverrideQueryOverride(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "queryParams": + suggest = "query_params" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in QueueHttpTargetUriOverrideQueryOverride. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + QueueHttpTargetUriOverrideQueryOverride.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + QueueHttpTargetUriOverrideQueryOverride.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + query_params: Optional[str] = None): + """ + :param str query_params: The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + """ + if query_params is not None: + pulumi.set(__self__, "query_params", query_params) + + @property + @pulumi.getter(name="queryParams") + def query_params(self) -> Optional[str]: + """ + The query parameters (e.g., qparam1=123&qparam2=456). Default is an empty string. + """ + return pulumi.get(self, "query_params") + + @pulumi.output_type class QueueIamBindingCondition(dict): def __init__(__self__, *, diff --git a/sdk/python/pulumi_gcp/cloudtasks/queue.py b/sdk/python/pulumi_gcp/cloudtasks/queue.py index 65ec1f89d8..647a3fad71 100644 --- a/sdk/python/pulumi_gcp/cloudtasks/queue.py +++ b/sdk/python/pulumi_gcp/cloudtasks/queue.py @@ -23,6 +23,7 @@ class QueueArgs: def __init__(__self__, *, location: pulumi.Input[str], app_engine_routing_override: Optional[pulumi.Input['QueueAppEngineRoutingOverrideArgs']] = None, + http_target: Optional[pulumi.Input['QueueHttpTargetArgs']] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, rate_limits: Optional[pulumi.Input['QueueRateLimitsArgs']] = None, @@ -37,6 +38,8 @@ def __init__(__self__, *, :param pulumi.Input['QueueAppEngineRoutingOverrideArgs'] app_engine_routing_override: Overrides for task-level appEngineRouting. These settings apply only to App Engine tasks in this queue Structure is documented below. + :param pulumi.Input['QueueHttpTargetArgs'] http_target: Modifies HTTP target for HTTP tasks. + Structure is documented below. :param pulumi.Input[str] name: The queue name. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -56,6 +59,8 @@ def __init__(__self__, *, pulumi.set(__self__, "location", location) if app_engine_routing_override is not None: pulumi.set(__self__, "app_engine_routing_override", app_engine_routing_override) + if http_target is not None: + pulumi.set(__self__, "http_target", http_target) if name is not None: pulumi.set(__self__, "name", name) if project is not None: @@ -96,6 +101,19 @@ def app_engine_routing_override(self) -> Optional[pulumi.Input['QueueAppEngineRo def app_engine_routing_override(self, value: Optional[pulumi.Input['QueueAppEngineRoutingOverrideArgs']]): pulumi.set(self, "app_engine_routing_override", value) + @property + @pulumi.getter(name="httpTarget") + def http_target(self) -> Optional[pulumi.Input['QueueHttpTargetArgs']]: + """ + Modifies HTTP target for HTTP tasks. + Structure is documented below. + """ + return pulumi.get(self, "http_target") + + @http_target.setter + def http_target(self, value: Optional[pulumi.Input['QueueHttpTargetArgs']]): + pulumi.set(self, "http_target", value) + @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: @@ -171,6 +189,7 @@ def stackdriver_logging_config(self, value: Optional[pulumi.Input['QueueStackdri class _QueueState: def __init__(__self__, *, app_engine_routing_override: Optional[pulumi.Input['QueueAppEngineRoutingOverrideArgs']] = None, + http_target: Optional[pulumi.Input['QueueHttpTargetArgs']] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, @@ -182,6 +201,8 @@ def __init__(__self__, *, :param pulumi.Input['QueueAppEngineRoutingOverrideArgs'] app_engine_routing_override: Overrides for task-level appEngineRouting. These settings apply only to App Engine tasks in this queue Structure is documented below. + :param pulumi.Input['QueueHttpTargetArgs'] http_target: Modifies HTTP target for HTTP tasks. + Structure is documented below. :param pulumi.Input[str] location: The location of the queue @@ -204,6 +225,8 @@ def __init__(__self__, *, """ if app_engine_routing_override is not None: pulumi.set(__self__, "app_engine_routing_override", app_engine_routing_override) + if http_target is not None: + pulumi.set(__self__, "http_target", http_target) if location is not None: pulumi.set(__self__, "location", location) if name is not None: @@ -231,6 +254,19 @@ def app_engine_routing_override(self) -> Optional[pulumi.Input['QueueAppEngineRo def app_engine_routing_override(self, value: Optional[pulumi.Input['QueueAppEngineRoutingOverrideArgs']]): pulumi.set(self, "app_engine_routing_override", value) + @property + @pulumi.getter(name="httpTarget") + def http_target(self) -> Optional[pulumi.Input['QueueHttpTargetArgs']]: + """ + Modifies HTTP target for HTTP tasks. + Structure is documented below. + """ + return pulumi.get(self, "http_target") + + @http_target.setter + def http_target(self, value: Optional[pulumi.Input['QueueHttpTargetArgs']]): + pulumi.set(self, "http_target", value) + @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: @@ -323,6 +359,7 @@ def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, app_engine_routing_override: Optional[pulumi.Input[Union['QueueAppEngineRoutingOverrideArgs', 'QueueAppEngineRoutingOverrideArgsDict']]] = None, + http_target: Optional[pulumi.Input[Union['QueueHttpTargetArgs', 'QueueHttpTargetArgsDict']]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, @@ -374,6 +411,98 @@ def __init__(__self__, "sampling_ratio": 0.9, }) ``` + ### Cloud Tasks Queue Http Target Oidc + + ```python + import pulumi + import pulumi_gcp as gcp + + oidc_service_account = gcp.serviceaccount.Account("oidc_service_account", + account_id="example-oidc", + display_name="Tasks Queue OIDC Service Account") + http_target_oidc = gcp.cloudtasks.Queue("http_target_oidc", + name="cloud-tasks-queue-http-target-oidc", + location="us-central1", + http_target={ + "http_method": "POST", + "uri_override": { + "scheme": "HTTPS", + "host": "oidc.example.com", + "port": "8443", + "path_override": { + "path": "/users/1234", + }, + "query_override": { + "query_params": "qparam1=123&qparam2=456", + }, + "uri_override_enforce_mode": "IF_NOT_EXISTS", + }, + "header_overrides": [ + { + "header": { + "key": "AddSomethingElse", + "value": "MyOtherValue", + }, + }, + { + "header": { + "key": "AddMe", + "value": "MyValue", + }, + }, + ], + "oidc_token": { + "service_account_email": oidc_service_account.email, + "audience": "https://oidc.example.com", + }, + }) + ``` + ### Cloud Tasks Queue Http Target Oauth + + ```python + import pulumi + import pulumi_gcp as gcp + + oauth_service_account = gcp.serviceaccount.Account("oauth_service_account", + account_id="example-oauth", + display_name="Tasks Queue OAuth Service Account") + http_target_oauth = gcp.cloudtasks.Queue("http_target_oauth", + name="cloud-tasks-queue-http-target-oauth", + location="us-central1", + http_target={ + "http_method": "POST", + "uri_override": { + "scheme": "HTTPS", + "host": "oauth.example.com", + "port": "8443", + "path_override": { + "path": "/users/1234", + }, + "query_override": { + "query_params": "qparam1=123&qparam2=456", + }, + "uri_override_enforce_mode": "IF_NOT_EXISTS", + }, + "header_overrides": [ + { + "header": { + "key": "AddSomethingElse", + "value": "MyOtherValue", + }, + }, + { + "header": { + "key": "AddMe", + "value": "MyValue", + }, + }, + ], + "oauth_token": { + "service_account_email": oauth_service_account.email, + "scope": "openid https://www.googleapis.com/auth/userinfo.email", + }, + }) + ``` ## Import @@ -404,6 +533,8 @@ def __init__(__self__, :param pulumi.Input[Union['QueueAppEngineRoutingOverrideArgs', 'QueueAppEngineRoutingOverrideArgsDict']] app_engine_routing_override: Overrides for task-level appEngineRouting. These settings apply only to App Engine tasks in this queue Structure is documented below. + :param pulumi.Input[Union['QueueHttpTargetArgs', 'QueueHttpTargetArgsDict']] http_target: Modifies HTTP target for HTTP tasks. + Structure is documented below. :param pulumi.Input[str] location: The location of the queue @@ -474,6 +605,98 @@ def __init__(__self__, "sampling_ratio": 0.9, }) ``` + ### Cloud Tasks Queue Http Target Oidc + + ```python + import pulumi + import pulumi_gcp as gcp + + oidc_service_account = gcp.serviceaccount.Account("oidc_service_account", + account_id="example-oidc", + display_name="Tasks Queue OIDC Service Account") + http_target_oidc = gcp.cloudtasks.Queue("http_target_oidc", + name="cloud-tasks-queue-http-target-oidc", + location="us-central1", + http_target={ + "http_method": "POST", + "uri_override": { + "scheme": "HTTPS", + "host": "oidc.example.com", + "port": "8443", + "path_override": { + "path": "/users/1234", + }, + "query_override": { + "query_params": "qparam1=123&qparam2=456", + }, + "uri_override_enforce_mode": "IF_NOT_EXISTS", + }, + "header_overrides": [ + { + "header": { + "key": "AddSomethingElse", + "value": "MyOtherValue", + }, + }, + { + "header": { + "key": "AddMe", + "value": "MyValue", + }, + }, + ], + "oidc_token": { + "service_account_email": oidc_service_account.email, + "audience": "https://oidc.example.com", + }, + }) + ``` + ### Cloud Tasks Queue Http Target Oauth + + ```python + import pulumi + import pulumi_gcp as gcp + + oauth_service_account = gcp.serviceaccount.Account("oauth_service_account", + account_id="example-oauth", + display_name="Tasks Queue OAuth Service Account") + http_target_oauth = gcp.cloudtasks.Queue("http_target_oauth", + name="cloud-tasks-queue-http-target-oauth", + location="us-central1", + http_target={ + "http_method": "POST", + "uri_override": { + "scheme": "HTTPS", + "host": "oauth.example.com", + "port": "8443", + "path_override": { + "path": "/users/1234", + }, + "query_override": { + "query_params": "qparam1=123&qparam2=456", + }, + "uri_override_enforce_mode": "IF_NOT_EXISTS", + }, + "header_overrides": [ + { + "header": { + "key": "AddSomethingElse", + "value": "MyOtherValue", + }, + }, + { + "header": { + "key": "AddMe", + "value": "MyValue", + }, + }, + ], + "oauth_token": { + "service_account_email": oauth_service_account.email, + "scope": "openid https://www.googleapis.com/auth/userinfo.email", + }, + }) + ``` ## Import @@ -515,6 +738,7 @@ def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, app_engine_routing_override: Optional[pulumi.Input[Union['QueueAppEngineRoutingOverrideArgs', 'QueueAppEngineRoutingOverrideArgsDict']]] = None, + http_target: Optional[pulumi.Input[Union['QueueHttpTargetArgs', 'QueueHttpTargetArgsDict']]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, @@ -531,6 +755,7 @@ def _internal_init(__self__, __props__ = QueueArgs.__new__(QueueArgs) __props__.__dict__["app_engine_routing_override"] = app_engine_routing_override + __props__.__dict__["http_target"] = http_target if location is None and not opts.urn: raise TypeError("Missing required property 'location'") __props__.__dict__["location"] = location @@ -550,6 +775,7 @@ def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, app_engine_routing_override: Optional[pulumi.Input[Union['QueueAppEngineRoutingOverrideArgs', 'QueueAppEngineRoutingOverrideArgsDict']]] = None, + http_target: Optional[pulumi.Input[Union['QueueHttpTargetArgs', 'QueueHttpTargetArgsDict']]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, @@ -566,6 +792,8 @@ def get(resource_name: str, :param pulumi.Input[Union['QueueAppEngineRoutingOverrideArgs', 'QueueAppEngineRoutingOverrideArgsDict']] app_engine_routing_override: Overrides for task-level appEngineRouting. These settings apply only to App Engine tasks in this queue Structure is documented below. + :param pulumi.Input[Union['QueueHttpTargetArgs', 'QueueHttpTargetArgsDict']] http_target: Modifies HTTP target for HTTP tasks. + Structure is documented below. :param pulumi.Input[str] location: The location of the queue @@ -591,6 +819,7 @@ def get(resource_name: str, __props__ = _QueueState.__new__(_QueueState) __props__.__dict__["app_engine_routing_override"] = app_engine_routing_override + __props__.__dict__["http_target"] = http_target __props__.__dict__["location"] = location __props__.__dict__["name"] = name __props__.__dict__["project"] = project @@ -609,6 +838,15 @@ def app_engine_routing_override(self) -> pulumi.Output[Optional['outputs.QueueAp """ return pulumi.get(self, "app_engine_routing_override") + @property + @pulumi.getter(name="httpTarget") + def http_target(self) -> pulumi.Output[Optional['outputs.QueueHttpTarget']]: + """ + Modifies HTTP target for HTTP tasks. + Structure is documented below. + """ + return pulumi.get(self, "http_target") + @property @pulumi.getter def location(self) -> pulumi.Output[str]: diff --git a/sdk/python/pulumi_gcp/compute/_inputs.py b/sdk/python/pulumi_gcp/compute/_inputs.py index 3bcd2e4166..668bc4b757 100644 --- a/sdk/python/pulumi_gcp/compute/_inputs.py +++ b/sdk/python/pulumi_gcp/compute/_inputs.py @@ -407,6 +407,8 @@ 'NodeGroupShareSettingsArgsDict', 'NodeGroupShareSettingsProjectMapArgs', 'NodeGroupShareSettingsProjectMapArgsDict', + 'NodeTemplateAcceleratorArgs', + 'NodeTemplateAcceleratorArgsDict', 'NodeTemplateNodeTypeFlexibilityArgs', 'NodeTemplateNodeTypeFlexibilityArgsDict', 'NodeTemplateServerBindingArgs', @@ -5952,7 +5954,7 @@ class FirewallAllowArgsDict(TypedDict): is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. """ elif False: @@ -5972,7 +5974,7 @@ def __init__(__self__, *, is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. """ pulumi.set(__self__, "protocol", protocol) @@ -6002,7 +6004,7 @@ def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. """ return pulumi.get(self, "ports") @@ -6027,7 +6029,7 @@ class FirewallDenyArgsDict(TypedDict): is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. """ elif False: @@ -6047,7 +6049,7 @@ def __init__(__self__, *, is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. """ pulumi.set(__self__, "protocol", protocol) @@ -6077,7 +6079,7 @@ def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. """ return pulumi.get(self, "ports") @@ -8568,6 +8570,10 @@ class InstanceBootDiskArgsDict(TypedDict): alongside the new instance. Either `initialize_params` or `source` must be set. Structure is documented below. """ + interface: NotRequired[pulumi.Input[str]] + """ + The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + """ kms_key_self_link: NotRequired[pulumi.Input[str]] """ The self_link of the encryption key that is @@ -8596,6 +8602,7 @@ def __init__(__self__, *, disk_encryption_key_raw: Optional[pulumi.Input[str]] = None, disk_encryption_key_sha256: Optional[pulumi.Input[str]] = None, initialize_params: Optional[pulumi.Input['InstanceBootDiskInitializeParamsArgs']] = None, + interface: Optional[pulumi.Input[str]] = None, kms_key_self_link: Optional[pulumi.Input[str]] = None, mode: Optional[pulumi.Input[str]] = None, source: Optional[pulumi.Input[str]] = None): @@ -8615,6 +8622,7 @@ def __init__(__self__, *, :param pulumi.Input['InstanceBootDiskInitializeParamsArgs'] initialize_params: Parameters for a new disk that will be created alongside the new instance. Either `initialize_params` or `source` must be set. Structure is documented below. + :param pulumi.Input[str] interface: The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) :param pulumi.Input[str] kms_key_self_link: The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of `kms_key_self_link` and `disk_encryption_key_raw` may be set. @@ -8634,6 +8642,8 @@ def __init__(__self__, *, pulumi.set(__self__, "disk_encryption_key_sha256", disk_encryption_key_sha256) if initialize_params is not None: pulumi.set(__self__, "initialize_params", initialize_params) + if interface is not None: + pulumi.set(__self__, "interface", interface) if kms_key_self_link is not None: pulumi.set(__self__, "kms_key_self_link", kms_key_self_link) if mode is not None: @@ -8711,6 +8721,18 @@ def initialize_params(self) -> Optional[pulumi.Input['InstanceBootDiskInitialize def initialize_params(self, value: Optional[pulumi.Input['InstanceBootDiskInitializeParamsArgs']]): pulumi.set(self, "initialize_params", value) + @property + @pulumi.getter + def interface(self) -> Optional[pulumi.Input[str]]: + """ + The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + """ + return pulumi.get(self, "interface") + + @interface.setter + def interface(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "interface", value) + @property @pulumi.getter(name="kmsKeySelfLink") def kms_key_self_link(self) -> Optional[pulumi.Input[str]]: @@ -9300,6 +9322,10 @@ class InstanceFromMachineImageBootDiskArgsDict(TypedDict): """ Parameters with which a disk was created alongside the instance. """ + interface: NotRequired[pulumi.Input[str]] + """ + The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + """ kms_key_self_link: NotRequired[pulumi.Input[str]] """ The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. @@ -9323,6 +9349,7 @@ def __init__(__self__, *, disk_encryption_key_raw: Optional[pulumi.Input[str]] = None, disk_encryption_key_sha256: Optional[pulumi.Input[str]] = None, initialize_params: Optional[pulumi.Input['InstanceFromMachineImageBootDiskInitializeParamsArgs']] = None, + interface: Optional[pulumi.Input[str]] = None, kms_key_self_link: Optional[pulumi.Input[str]] = None, mode: Optional[pulumi.Input[str]] = None, source: Optional[pulumi.Input[str]] = None): @@ -9332,6 +9359,7 @@ def __init__(__self__, *, :param pulumi.Input[str] disk_encryption_key_raw: A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. :param pulumi.Input[str] disk_encryption_key_sha256: The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. :param pulumi.Input['InstanceFromMachineImageBootDiskInitializeParamsArgs'] initialize_params: Parameters with which a disk was created alongside the instance. + :param pulumi.Input[str] interface: The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) :param pulumi.Input[str] kms_key_self_link: The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. :param pulumi.Input[str] mode: Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE". :param pulumi.Input[str] source: The name or self_link of the disk attached to this instance. @@ -9346,6 +9374,8 @@ def __init__(__self__, *, pulumi.set(__self__, "disk_encryption_key_sha256", disk_encryption_key_sha256) if initialize_params is not None: pulumi.set(__self__, "initialize_params", initialize_params) + if interface is not None: + pulumi.set(__self__, "interface", interface) if kms_key_self_link is not None: pulumi.set(__self__, "kms_key_self_link", kms_key_self_link) if mode is not None: @@ -9413,6 +9443,18 @@ def initialize_params(self) -> Optional[pulumi.Input['InstanceFromMachineImageBo def initialize_params(self, value: Optional[pulumi.Input['InstanceFromMachineImageBootDiskInitializeParamsArgs']]): pulumi.set(self, "initialize_params", value) + @property + @pulumi.getter + def interface(self) -> Optional[pulumi.Input[str]]: + """ + The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + """ + return pulumi.get(self, "interface") + + @interface.setter + def interface(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "interface", value) + @property @pulumi.getter(name="kmsKeySelfLink") def kms_key_self_link(self) -> Optional[pulumi.Input[str]]: @@ -11386,6 +11428,10 @@ class InstanceFromTemplateBootDiskArgsDict(TypedDict): """ Parameters with which a disk was created alongside the instance. """ + interface: NotRequired[pulumi.Input[str]] + """ + The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + """ kms_key_self_link: NotRequired[pulumi.Input[str]] """ The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. @@ -11409,6 +11455,7 @@ def __init__(__self__, *, disk_encryption_key_raw: Optional[pulumi.Input[str]] = None, disk_encryption_key_sha256: Optional[pulumi.Input[str]] = None, initialize_params: Optional[pulumi.Input['InstanceFromTemplateBootDiskInitializeParamsArgs']] = None, + interface: Optional[pulumi.Input[str]] = None, kms_key_self_link: Optional[pulumi.Input[str]] = None, mode: Optional[pulumi.Input[str]] = None, source: Optional[pulumi.Input[str]] = None): @@ -11418,6 +11465,7 @@ def __init__(__self__, *, :param pulumi.Input[str] disk_encryption_key_raw: A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. :param pulumi.Input[str] disk_encryption_key_sha256: The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. :param pulumi.Input['InstanceFromTemplateBootDiskInitializeParamsArgs'] initialize_params: Parameters with which a disk was created alongside the instance. + :param pulumi.Input[str] interface: The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) :param pulumi.Input[str] kms_key_self_link: The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. :param pulumi.Input[str] mode: Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE". :param pulumi.Input[str] source: The name or self_link of the disk attached to this instance. @@ -11432,6 +11480,8 @@ def __init__(__self__, *, pulumi.set(__self__, "disk_encryption_key_sha256", disk_encryption_key_sha256) if initialize_params is not None: pulumi.set(__self__, "initialize_params", initialize_params) + if interface is not None: + pulumi.set(__self__, "interface", interface) if kms_key_self_link is not None: pulumi.set(__self__, "kms_key_self_link", kms_key_self_link) if mode is not None: @@ -11499,6 +11549,18 @@ def initialize_params(self) -> Optional[pulumi.Input['InstanceFromTemplateBootDi def initialize_params(self, value: Optional[pulumi.Input['InstanceFromTemplateBootDiskInitializeParamsArgs']]): pulumi.set(self, "initialize_params", value) + @property + @pulumi.getter + def interface(self) -> Optional[pulumi.Input[str]]: + """ + The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + """ + return pulumi.get(self, "interface") + + @interface.setter + def interface(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "interface", value) + @property @pulumi.getter(name="kmsKeySelfLink") def kms_key_self_link(self) -> Optional[pulumi.Input[str]]: @@ -14649,7 +14711,7 @@ class InstanceNetworkInterfaceArgsDict(TypedDict): subnetwork_project: NotRequired[pulumi.Input[str]] """ The project in which the subnetwork belongs. - If the `subnetwork` is a self_link, this field is ignored in favor of the project + If the `subnetwork` is a self_link, this field is set to the project defined in the subnetwork self_link. If the `subnetwork` is a name and this field is not provided, the provider project is used. """ @@ -14707,7 +14769,7 @@ def __init__(__self__, *, network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. :param pulumi.Input[str] subnetwork_project: The project in which the subnetwork belongs. - If the `subnetwork` is a self_link, this field is ignored in favor of the project + If the `subnetwork` is a self_link, this field is set to the project defined in the subnetwork self_link. If the `subnetwork` is a name and this field is not provided, the provider project is used. """ @@ -14944,7 +15006,7 @@ def subnetwork(self, value: Optional[pulumi.Input[str]]): def subnetwork_project(self) -> Optional[pulumi.Input[str]]: """ The project in which the subnetwork belongs. - If the `subnetwork` is a self_link, this field is ignored in favor of the project + If the `subnetwork` is a self_link, this field is set to the project defined in the subnetwork self_link. If the `subnetwork` is a name and this field is not provided, the provider project is used. """ @@ -20142,6 +20204,64 @@ def project_id(self, value: pulumi.Input[str]): pulumi.set(self, "project_id", value) +if not MYPY: + class NodeTemplateAcceleratorArgsDict(TypedDict): + accelerator_count: NotRequired[pulumi.Input[int]] + """ + The number of the guest accelerator cards exposed to this + node template. + """ + accelerator_type: NotRequired[pulumi.Input[str]] + """ + Full or partial URL of the accelerator type resource to expose + to this node template. + """ +elif False: + NodeTemplateAcceleratorArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class NodeTemplateAcceleratorArgs: + def __init__(__self__, *, + accelerator_count: Optional[pulumi.Input[int]] = None, + accelerator_type: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[int] accelerator_count: The number of the guest accelerator cards exposed to this + node template. + :param pulumi.Input[str] accelerator_type: Full or partial URL of the accelerator type resource to expose + to this node template. + """ + if accelerator_count is not None: + pulumi.set(__self__, "accelerator_count", accelerator_count) + if accelerator_type is not None: + pulumi.set(__self__, "accelerator_type", accelerator_type) + + @property + @pulumi.getter(name="acceleratorCount") + def accelerator_count(self) -> Optional[pulumi.Input[int]]: + """ + The number of the guest accelerator cards exposed to this + node template. + """ + return pulumi.get(self, "accelerator_count") + + @accelerator_count.setter + def accelerator_count(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "accelerator_count", value) + + @property + @pulumi.getter(name="acceleratorType") + def accelerator_type(self) -> Optional[pulumi.Input[str]]: + """ + Full or partial URL of the accelerator type resource to expose + to this node template. + """ + return pulumi.get(self, "accelerator_type") + + @accelerator_type.setter + def accelerator_type(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "accelerator_type", value) + + if not MYPY: class NodeTemplateNodeTypeFlexibilityArgsDict(TypedDict): cpus: NotRequired[pulumi.Input[str]] diff --git a/sdk/python/pulumi_gcp/compute/get_instance.py b/sdk/python/pulumi_gcp/compute/get_instance.py index d2fe28d5d0..608c91c0b4 100644 --- a/sdk/python/pulumi_gcp/compute/get_instance.py +++ b/sdk/python/pulumi_gcp/compute/get_instance.py @@ -203,7 +203,7 @@ def cpu_platform(self) -> str: @pulumi.getter(name="currentStatus") def current_status(self) -> str: """ - The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). """ return pulumi.get(self, "current_status") diff --git a/sdk/python/pulumi_gcp/compute/health_check.py b/sdk/python/pulumi_gcp/compute/health_check.py index f925176596..5693dac1fe 100644 --- a/sdk/python/pulumi_gcp/compute/health_check.py +++ b/sdk/python/pulumi_gcp/compute/health_check.py @@ -969,6 +969,63 @@ def __init__(__self__, "enable": True, }) ``` + ### Compute Health Check Http Source Regions + + ```python + import pulumi + import pulumi_gcp as gcp + + http_health_check_with_source_regions = gcp.compute.HealthCheck("http-health-check-with-source-regions", + name="http-health-check", + check_interval_sec=30, + http_health_check={ + "port": 80, + "port_specification": "USE_FIXED_PORT", + }, + source_regions=[ + "us-west1", + "us-central1", + "us-east5", + ]) + ``` + ### Compute Health Check Https Source Regions + + ```python + import pulumi + import pulumi_gcp as gcp + + https_health_check_with_source_regions = gcp.compute.HealthCheck("https-health-check-with-source-regions", + name="https-health-check", + check_interval_sec=30, + https_health_check={ + "port": 80, + "port_specification": "USE_FIXED_PORT", + }, + source_regions=[ + "us-west1", + "us-central1", + "us-east5", + ]) + ``` + ### Compute Health Check Tcp Source Regions + + ```python + import pulumi + import pulumi_gcp as gcp + + tcp_health_check_with_source_regions = gcp.compute.HealthCheck("tcp-health-check-with-source-regions", + name="tcp-health-check", + check_interval_sec=30, + tcp_health_check={ + "port": 80, + "port_specification": "USE_FIXED_PORT", + }, + source_regions=[ + "us-west1", + "us-central1", + "us-east5", + ]) + ``` ## Import @@ -1302,6 +1359,63 @@ def __init__(__self__, "enable": True, }) ``` + ### Compute Health Check Http Source Regions + + ```python + import pulumi + import pulumi_gcp as gcp + + http_health_check_with_source_regions = gcp.compute.HealthCheck("http-health-check-with-source-regions", + name="http-health-check", + check_interval_sec=30, + http_health_check={ + "port": 80, + "port_specification": "USE_FIXED_PORT", + }, + source_regions=[ + "us-west1", + "us-central1", + "us-east5", + ]) + ``` + ### Compute Health Check Https Source Regions + + ```python + import pulumi + import pulumi_gcp as gcp + + https_health_check_with_source_regions = gcp.compute.HealthCheck("https-health-check-with-source-regions", + name="https-health-check", + check_interval_sec=30, + https_health_check={ + "port": 80, + "port_specification": "USE_FIXED_PORT", + }, + source_regions=[ + "us-west1", + "us-central1", + "us-east5", + ]) + ``` + ### Compute Health Check Tcp Source Regions + + ```python + import pulumi + import pulumi_gcp as gcp + + tcp_health_check_with_source_regions = gcp.compute.HealthCheck("tcp-health-check-with-source-regions", + name="tcp-health-check", + check_interval_sec=30, + tcp_health_check={ + "port": 80, + "port_specification": "USE_FIXED_PORT", + }, + source_regions=[ + "us-west1", + "us-central1", + "us-east5", + ]) + ``` ## Import diff --git a/sdk/python/pulumi_gcp/compute/instance.py b/sdk/python/pulumi_gcp/compute/instance.py index 8eb61ac62a..c4fd1763bb 100644 --- a/sdk/python/pulumi_gcp/compute/instance.py +++ b/sdk/python/pulumi_gcp/compute/instance.py @@ -693,7 +693,7 @@ def __init__(__self__, *, This defaults to false. :param pulumi.Input['InstanceConfidentialInstanceConfigArgs'] confidential_instance_config: Enable [Confidential Mode](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) on this VM. Structure is documented below :param pulumi.Input[str] cpu_platform: The CPU platform used by this instance. - :param pulumi.Input[str] current_status: The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + :param pulumi.Input[str] current_status: The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). :param pulumi.Input[bool] deletion_protection: Enable deletion protection on this instance. Defaults to false. **Note:** you must disable deletion protection before removing the resource (e.g., via `pulumi destroy`), or the instance cannot be deleted and the provider run will not complete successfully. :param pulumi.Input[str] description: A brief description of this resource. @@ -955,7 +955,7 @@ def cpu_platform(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="currentStatus") def current_status(self) -> Optional[pulumi.Input[str]]: """ - The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). """ return pulumi.get(self, "current_status") @@ -1488,6 +1488,47 @@ def __init__(__self__, }) ``` + ### Confidential Computing + + Example with [Confidential Mode](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) activated. + + ```python + import pulumi + import pulumi_gcp as gcp + + default = gcp.serviceaccount.Account("default", + account_id="my-custom-sa", + display_name="Custom SA for VM Instance") + confidential_instance = gcp.compute.Instance("confidential_instance", + network_interfaces=[{ + "access_configs": [{}], + "network": "default", + }], + name="my-confidential-instance", + zone="us-central1-a", + machine_type="n2d-standard-2", + min_cpu_platform="AMD Milan", + confidential_instance_config={ + "enable_confidential_compute": True, + "confidential_instance_type": "SEV", + }, + boot_disk={ + "initialize_params": { + "image": "ubuntu-os-cloud/ubuntu-2004-lts", + "labels": { + "my_label": "value", + }, + }, + }, + scratch_disks=[{ + "interface": "NVME", + }], + service_account={ + "email": default.email, + "scopes": ["cloud-platform"], + }) + ``` + ## Import Instances can be imported using any of these accepted formats: @@ -1659,6 +1700,47 @@ def __init__(__self__, }) ``` + ### Confidential Computing + + Example with [Confidential Mode](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) activated. + + ```python + import pulumi + import pulumi_gcp as gcp + + default = gcp.serviceaccount.Account("default", + account_id="my-custom-sa", + display_name="Custom SA for VM Instance") + confidential_instance = gcp.compute.Instance("confidential_instance", + network_interfaces=[{ + "access_configs": [{}], + "network": "default", + }], + name="my-confidential-instance", + zone="us-central1-a", + machine_type="n2d-standard-2", + min_cpu_platform="AMD Milan", + confidential_instance_config={ + "enable_confidential_compute": True, + "confidential_instance_type": "SEV", + }, + boot_disk={ + "initialize_params": { + "image": "ubuntu-os-cloud/ubuntu-2004-lts", + "labels": { + "my_label": "value", + }, + }, + }, + scratch_disks=[{ + "interface": "NVME", + }], + service_account={ + "email": default.email, + "scopes": ["cloud-platform"], + }) + ``` + ## Import Instances can be imported using any of these accepted formats: @@ -1854,7 +1936,7 @@ def get(resource_name: str, This defaults to false. :param pulumi.Input[Union['InstanceConfidentialInstanceConfigArgs', 'InstanceConfidentialInstanceConfigArgsDict']] confidential_instance_config: Enable [Confidential Mode](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) on this VM. Structure is documented below :param pulumi.Input[str] cpu_platform: The CPU platform used by this instance. - :param pulumi.Input[str] current_status: The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + :param pulumi.Input[str] current_status: The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). :param pulumi.Input[bool] deletion_protection: Enable deletion protection on this instance. Defaults to false. **Note:** you must disable deletion protection before removing the resource (e.g., via `pulumi destroy`), or the instance cannot be deleted and the provider run will not complete successfully. :param pulumi.Input[str] description: A brief description of this resource. @@ -2053,7 +2135,7 @@ def cpu_platform(self) -> pulumi.Output[str]: @pulumi.getter(name="currentStatus") def current_status(self) -> pulumi.Output[str]: """ - The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + The current status of the instance. This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle). """ return pulumi.get(self, "current_status") diff --git a/sdk/python/pulumi_gcp/compute/instance_template.py b/sdk/python/pulumi_gcp/compute/instance_template.py index 72564f8c41..87e85b8fee 100644 --- a/sdk/python/pulumi_gcp/compute/instance_template.py +++ b/sdk/python/pulumi_gcp/compute/instance_template.py @@ -1348,6 +1348,39 @@ def __init__(__self__, }) ``` + ### Confidential Computing + + Example with [Confidential Mode](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) activated. + + ```python + import pulumi + import pulumi_gcp as gcp + + default = gcp.serviceaccount.Account("default", + account_id="my-custom-sa", + display_name="Custom SA for VM Instance") + confidential_instance_template = gcp.compute.InstanceTemplate("confidential_instance_template", + network_interfaces=[{ + "access_configs": [{}], + "network": "default", + }], + name="my-confidential-instance-template", + region="us-central1", + machine_type="n2d-standard-2", + min_cpu_platform="AMD Milan", + confidential_instance_config={ + "enable_confidential_compute": True, + "confidential_instance_type": "SEV", + }, + disks=[{ + "source_image": "ubuntu-os-cloud/ubuntu-2004-lts", + }], + service_account={ + "email": default.email, + "scopes": ["cloud-platform"], + }) + ``` + ## Deploying the Latest Image A common way to use instance templates and managed instance groups is to deploy the @@ -1649,6 +1682,39 @@ def __init__(__self__, }) ``` + ### Confidential Computing + + Example with [Confidential Mode](https://cloud.google.com/confidential-computing/confidential-vm/docs/confidential-vm-overview) activated. + + ```python + import pulumi + import pulumi_gcp as gcp + + default = gcp.serviceaccount.Account("default", + account_id="my-custom-sa", + display_name="Custom SA for VM Instance") + confidential_instance_template = gcp.compute.InstanceTemplate("confidential_instance_template", + network_interfaces=[{ + "access_configs": [{}], + "network": "default", + }], + name="my-confidential-instance-template", + region="us-central1", + machine_type="n2d-standard-2", + min_cpu_platform="AMD Milan", + confidential_instance_config={ + "enable_confidential_compute": True, + "confidential_instance_type": "SEV", + }, + disks=[{ + "source_image": "ubuntu-os-cloud/ubuntu-2004-lts", + }], + service_account={ + "email": default.email, + "scopes": ["cloud-platform"], + }) + ``` + ## Deploying the Latest Image A common way to use instance templates and managed instance groups is to deploy the diff --git a/sdk/python/pulumi_gcp/compute/interconnect.py b/sdk/python/pulumi_gcp/compute/interconnect.py index 663dfa7e0f..f8c65907e6 100644 --- a/sdk/python/pulumi_gcp/compute/interconnect.py +++ b/sdk/python/pulumi_gcp/compute/interconnect.py @@ -83,11 +83,12 @@ def __init__(__self__, *, If it is not provided, the provider project is used. :param pulumi.Input[str] remote_location: Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to. - :param pulumi.Input[Sequence[pulumi.Input[str]]] requested_features: interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + :param pulumi.Input[Sequence[pulumi.Input[str]]] requested_features: interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if - available). - Each value may be one of: `MACSEC`. + available). Note that MACSEC is still technically allowed for compatibility reasons, but it + does not work with the API, and will be removed in an upcoming major version. + Each value may be one of: `MACSEC`, `IF_MACSEC`. """ pulumi.set(__self__, "customer_name", customer_name) pulumi.set(__self__, "interconnect_type", interconnect_type) @@ -318,11 +319,12 @@ def remote_location(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="requestedFeatures") def requested_features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ - interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if - available). - Each value may be one of: `MACSEC`. + available). Note that MACSEC is still technically allowed for compatibility reasons, but it + does not work with the API, and will be removed in an upcoming major version. + Each value may be one of: `MACSEC`, `IF_MACSEC`. """ return pulumi.get(self, "requested_features") @@ -443,11 +445,12 @@ def __init__(__self__, *, and default labels configured on the provider. :param pulumi.Input[str] remote_location: Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to. - :param pulumi.Input[Sequence[pulumi.Input[str]]] requested_features: interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + :param pulumi.Input[Sequence[pulumi.Input[str]]] requested_features: interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if - available). - Each value may be one of: `MACSEC`. + available). Note that MACSEC is still technically allowed for compatibility reasons, but it + does not work with the API, and will be removed in an upcoming major version. + Each value may be one of: `MACSEC`, `IF_MACSEC`. :param pulumi.Input[int] requested_link_count: Target number of physical links in the link bundle, as requested by the customer. :param pulumi.Input[bool] satisfies_pzs: Reserved for future use. :param pulumi.Input[str] state: (Output) @@ -887,11 +890,12 @@ def remote_location(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="requestedFeatures") def requested_features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ - interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if - available). - Each value may be one of: `MACSEC`. + available). Note that MACSEC is still technically allowed for compatibility reasons, but it + does not work with the API, and will be removed in an upcoming major version. + Each value may be one of: `MACSEC`, `IF_MACSEC`. """ return pulumi.get(self, "requested_features") @@ -1062,11 +1066,12 @@ def __init__(__self__, If it is not provided, the provider project is used. :param pulumi.Input[str] remote_location: Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to. - :param pulumi.Input[Sequence[pulumi.Input[str]]] requested_features: interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + :param pulumi.Input[Sequence[pulumi.Input[str]]] requested_features: interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if - available). - Each value may be one of: `MACSEC`. + available). Note that MACSEC is still technically allowed for compatibility reasons, but it + does not work with the API, and will be removed in an upcoming major version. + Each value may be one of: `MACSEC`, `IF_MACSEC`. :param pulumi.Input[int] requested_link_count: Target number of physical links in the link bundle, as requested by the customer. """ ... @@ -1332,11 +1337,12 @@ def get(resource_name: str, and default labels configured on the provider. :param pulumi.Input[str] remote_location: Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to. - :param pulumi.Input[Sequence[pulumi.Input[str]]] requested_features: interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + :param pulumi.Input[Sequence[pulumi.Input[str]]] requested_features: interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if - available). - Each value may be one of: `MACSEC`. + available). Note that MACSEC is still technically allowed for compatibility reasons, but it + does not work with the API, and will be removed in an upcoming major version. + Each value may be one of: `MACSEC`, `IF_MACSEC`. :param pulumi.Input[int] requested_link_count: Target number of physical links in the link bundle, as requested by the customer. :param pulumi.Input[bool] satisfies_pzs: Reserved for future use. :param pulumi.Input[str] state: (Output) @@ -1647,11 +1653,12 @@ def remote_location(self) -> pulumi.Output[Optional[str]]: @pulumi.getter(name="requestedFeatures") def requested_features(self) -> pulumi.Output[Optional[Sequence[str]]]: """ - interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + interconnects.list of features requested for this Interconnect connection. Options: IF_MACSEC ( If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if - available). - Each value may be one of: `MACSEC`. + available). Note that MACSEC is still technically allowed for compatibility reasons, but it + does not work with the API, and will be removed in an upcoming major version. + Each value may be one of: `MACSEC`, `IF_MACSEC`. """ return pulumi.get(self, "requested_features") diff --git a/sdk/python/pulumi_gcp/compute/node_template.py b/sdk/python/pulumi_gcp/compute/node_template.py index a44afa0953..76e0d8e7bf 100644 --- a/sdk/python/pulumi_gcp/compute/node_template.py +++ b/sdk/python/pulumi_gcp/compute/node_template.py @@ -21,6 +21,7 @@ @pulumi.input_type class NodeTemplateArgs: def __init__(__self__, *, + accelerators: Optional[pulumi.Input[Sequence[pulumi.Input['NodeTemplateAcceleratorArgs']]]] = None, cpu_overcommit_type: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, @@ -32,6 +33,9 @@ def __init__(__self__, *, server_binding: Optional[pulumi.Input['NodeTemplateServerBindingArgs']] = None): """ The set of arguments for constructing a NodeTemplate resource. + :param pulumi.Input[Sequence[pulumi.Input['NodeTemplateAcceleratorArgs']]] accelerators: List of the type and count of accelerator cards attached to the + node template + Structure is documented below. :param pulumi.Input[str] cpu_overcommit_type: CPU overcommit. Default value is `NONE`. Possible values are: `ENABLED`, `NONE`. @@ -54,6 +58,8 @@ def __init__(__self__, *, where the nodes should restart following a maintenance event. Structure is documented below. """ + if accelerators is not None: + pulumi.set(__self__, "accelerators", accelerators) if cpu_overcommit_type is not None: pulumi.set(__self__, "cpu_overcommit_type", cpu_overcommit_type) if description is not None: @@ -73,6 +79,20 @@ def __init__(__self__, *, if server_binding is not None: pulumi.set(__self__, "server_binding", server_binding) + @property + @pulumi.getter + def accelerators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NodeTemplateAcceleratorArgs']]]]: + """ + List of the type and count of accelerator cards attached to the + node template + Structure is documented below. + """ + return pulumi.get(self, "accelerators") + + @accelerators.setter + def accelerators(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NodeTemplateAcceleratorArgs']]]]): + pulumi.set(self, "accelerators", value) + @property @pulumi.getter(name="cpuOvercommitType") def cpu_overcommit_type(self) -> Optional[pulumi.Input[str]]: @@ -197,6 +217,7 @@ def server_binding(self, value: Optional[pulumi.Input['NodeTemplateServerBinding @pulumi.input_type class _NodeTemplateState: def __init__(__self__, *, + accelerators: Optional[pulumi.Input[Sequence[pulumi.Input['NodeTemplateAcceleratorArgs']]]] = None, cpu_overcommit_type: Optional[pulumi.Input[str]] = None, creation_timestamp: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, @@ -210,6 +231,9 @@ def __init__(__self__, *, server_binding: Optional[pulumi.Input['NodeTemplateServerBindingArgs']] = None): """ Input properties used for looking up and filtering NodeTemplate resources. + :param pulumi.Input[Sequence[pulumi.Input['NodeTemplateAcceleratorArgs']]] accelerators: List of the type and count of accelerator cards attached to the + node template + Structure is documented below. :param pulumi.Input[str] cpu_overcommit_type: CPU overcommit. Default value is `NONE`. Possible values are: `ENABLED`, `NONE`. @@ -234,6 +258,8 @@ def __init__(__self__, *, where the nodes should restart following a maintenance event. Structure is documented below. """ + if accelerators is not None: + pulumi.set(__self__, "accelerators", accelerators) if cpu_overcommit_type is not None: pulumi.set(__self__, "cpu_overcommit_type", cpu_overcommit_type) if creation_timestamp is not None: @@ -257,6 +283,20 @@ def __init__(__self__, *, if server_binding is not None: pulumi.set(__self__, "server_binding", server_binding) + @property + @pulumi.getter + def accelerators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NodeTemplateAcceleratorArgs']]]]: + """ + List of the type and count of accelerator cards attached to the + node template + Structure is documented below. + """ + return pulumi.get(self, "accelerators") + + @accelerators.setter + def accelerators(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NodeTemplateAcceleratorArgs']]]]): + pulumi.set(self, "accelerators", value) + @property @pulumi.getter(name="cpuOvercommitType") def cpu_overcommit_type(self) -> Optional[pulumi.Input[str]]: @@ -407,6 +447,7 @@ class NodeTemplate(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, + accelerators: Optional[pulumi.Input[Sequence[pulumi.Input[Union['NodeTemplateAcceleratorArgs', 'NodeTemplateAcceleratorArgsDict']]]]] = None, cpu_overcommit_type: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, @@ -459,6 +500,22 @@ def __init__(__self__, "type": "RESTART_NODE_ON_MINIMAL_SERVERS", }) ``` + ### Node Template Accelerators + + ```python + import pulumi + import pulumi_gcp as gcp + + central1a = gcp.compute.get_node_types(zone="us-central1-a") + template = gcp.compute.NodeTemplate("template", + name="soletenant-with-accelerators", + region="us-central1", + node_type="n1-node-96-624", + accelerators=[{ + "accelerator_type": "nvidia-tesla-t4", + "accelerator_count": 4, + }]) + ``` ## Import @@ -492,6 +549,9 @@ def __init__(__self__, :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[Sequence[pulumi.Input[Union['NodeTemplateAcceleratorArgs', 'NodeTemplateAcceleratorArgsDict']]]] accelerators: List of the type and count of accelerator cards attached to the + node template + Structure is documented below. :param pulumi.Input[str] cpu_overcommit_type: CPU overcommit. Default value is `NONE`. Possible values are: `ENABLED`, `NONE`. @@ -562,6 +622,22 @@ def __init__(__self__, "type": "RESTART_NODE_ON_MINIMAL_SERVERS", }) ``` + ### Node Template Accelerators + + ```python + import pulumi + import pulumi_gcp as gcp + + central1a = gcp.compute.get_node_types(zone="us-central1-a") + template = gcp.compute.NodeTemplate("template", + name="soletenant-with-accelerators", + region="us-central1", + node_type="n1-node-96-624", + accelerators=[{ + "accelerator_type": "nvidia-tesla-t4", + "accelerator_count": 4, + }]) + ``` ## Import @@ -608,6 +684,7 @@ def __init__(__self__, resource_name: str, *args, **kwargs): def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, + accelerators: Optional[pulumi.Input[Sequence[pulumi.Input[Union['NodeTemplateAcceleratorArgs', 'NodeTemplateAcceleratorArgsDict']]]]] = None, cpu_overcommit_type: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, @@ -626,6 +703,7 @@ def _internal_init(__self__, raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = NodeTemplateArgs.__new__(NodeTemplateArgs) + __props__.__dict__["accelerators"] = accelerators __props__.__dict__["cpu_overcommit_type"] = cpu_overcommit_type __props__.__dict__["description"] = description __props__.__dict__["name"] = name @@ -647,6 +725,7 @@ def _internal_init(__self__, def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, + accelerators: Optional[pulumi.Input[Sequence[pulumi.Input[Union['NodeTemplateAcceleratorArgs', 'NodeTemplateAcceleratorArgsDict']]]]] = None, cpu_overcommit_type: Optional[pulumi.Input[str]] = None, creation_timestamp: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, @@ -665,6 +744,9 @@ def get(resource_name: str, :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[Sequence[pulumi.Input[Union['NodeTemplateAcceleratorArgs', 'NodeTemplateAcceleratorArgsDict']]]] accelerators: List of the type and count of accelerator cards attached to the + node template + Structure is documented below. :param pulumi.Input[str] cpu_overcommit_type: CPU overcommit. Default value is `NONE`. Possible values are: `ENABLED`, `NONE`. @@ -693,6 +775,7 @@ def get(resource_name: str, __props__ = _NodeTemplateState.__new__(_NodeTemplateState) + __props__.__dict__["accelerators"] = accelerators __props__.__dict__["cpu_overcommit_type"] = cpu_overcommit_type __props__.__dict__["creation_timestamp"] = creation_timestamp __props__.__dict__["description"] = description @@ -706,6 +789,16 @@ def get(resource_name: str, __props__.__dict__["server_binding"] = server_binding return NodeTemplate(resource_name, opts=opts, __props__=__props__) + @property + @pulumi.getter + def accelerators(self) -> pulumi.Output[Optional[Sequence['outputs.NodeTemplateAccelerator']]]: + """ + List of the type and count of accelerator cards attached to the + node template + Structure is documented below. + """ + return pulumi.get(self, "accelerators") + @property @pulumi.getter(name="cpuOvercommitType") def cpu_overcommit_type(self) -> pulumi.Output[Optional[str]]: diff --git a/sdk/python/pulumi_gcp/compute/outputs.py b/sdk/python/pulumi_gcp/compute/outputs.py index 9fe2619ae2..dd5aff7542 100644 --- a/sdk/python/pulumi_gcp/compute/outputs.py +++ b/sdk/python/pulumi_gcp/compute/outputs.py @@ -212,6 +212,7 @@ 'NodeGroupMaintenanceWindow', 'NodeGroupShareSettings', 'NodeGroupShareSettingsProjectMap', + 'NodeTemplateAccelerator', 'NodeTemplateNodeTypeFlexibility', 'NodeTemplateServerBinding', 'OrganizationSecurityPolicyRuleMatch', @@ -4290,7 +4291,7 @@ def __init__(__self__, *, is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. """ pulumi.set(__self__, "protocol", protocol) @@ -4316,7 +4317,7 @@ def ports(self) -> Optional[Sequence[str]]: is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. """ return pulumi.get(self, "ports") @@ -4336,7 +4337,7 @@ def __init__(__self__, *, is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. """ pulumi.set(__self__, "protocol", protocol) @@ -4362,7 +4363,7 @@ def ports(self) -> Optional[Sequence[str]]: is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. - Example inputs include: ["22"], ["80","443"], and + Example inputs include: [22], [80, 443], and ["12345-12349"]. """ return pulumi.get(self, "ports") @@ -6218,6 +6219,7 @@ def __init__(__self__, *, disk_encryption_key_raw: Optional[str] = None, disk_encryption_key_sha256: Optional[str] = None, initialize_params: Optional['outputs.InstanceBootDiskInitializeParams'] = None, + interface: Optional[str] = None, kms_key_self_link: Optional[str] = None, mode: Optional[str] = None, source: Optional[str] = None): @@ -6237,6 +6239,7 @@ def __init__(__self__, *, :param 'InstanceBootDiskInitializeParamsArgs' initialize_params: Parameters for a new disk that will be created alongside the new instance. Either `initialize_params` or `source` must be set. Structure is documented below. + :param str interface: The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) :param str kms_key_self_link: The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of `kms_key_self_link` and `disk_encryption_key_raw` may be set. @@ -6256,6 +6259,8 @@ def __init__(__self__, *, pulumi.set(__self__, "disk_encryption_key_sha256", disk_encryption_key_sha256) if initialize_params is not None: pulumi.set(__self__, "initialize_params", initialize_params) + if interface is not None: + pulumi.set(__self__, "interface", interface) if kms_key_self_link is not None: pulumi.set(__self__, "kms_key_self_link", kms_key_self_link) if mode is not None: @@ -6313,6 +6318,14 @@ def initialize_params(self) -> Optional['outputs.InstanceBootDiskInitializeParam """ return pulumi.get(self, "initialize_params") + @property + @pulumi.getter + def interface(self) -> Optional[str]: + """ + The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + """ + return pulumi.get(self, "interface") + @property @pulumi.getter(name="kmsKeySelfLink") def kms_key_self_link(self) -> Optional[str]: @@ -6785,6 +6798,7 @@ def __init__(__self__, *, disk_encryption_key_raw: Optional[str] = None, disk_encryption_key_sha256: Optional[str] = None, initialize_params: Optional['outputs.InstanceFromMachineImageBootDiskInitializeParams'] = None, + interface: Optional[str] = None, kms_key_self_link: Optional[str] = None, mode: Optional[str] = None, source: Optional[str] = None): @@ -6794,6 +6808,7 @@ def __init__(__self__, *, :param str disk_encryption_key_raw: A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. :param str disk_encryption_key_sha256: The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. :param 'InstanceFromMachineImageBootDiskInitializeParamsArgs' initialize_params: Parameters with which a disk was created alongside the instance. + :param str interface: The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) :param str kms_key_self_link: The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. :param str mode: Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE". :param str source: The name or self_link of the disk attached to this instance. @@ -6808,6 +6823,8 @@ def __init__(__self__, *, pulumi.set(__self__, "disk_encryption_key_sha256", disk_encryption_key_sha256) if initialize_params is not None: pulumi.set(__self__, "initialize_params", initialize_params) + if interface is not None: + pulumi.set(__self__, "interface", interface) if kms_key_self_link is not None: pulumi.set(__self__, "kms_key_self_link", kms_key_self_link) if mode is not None: @@ -6855,6 +6872,14 @@ def initialize_params(self) -> Optional['outputs.InstanceFromMachineImageBootDis """ return pulumi.get(self, "initialize_params") + @property + @pulumi.getter + def interface(self) -> Optional[str]: + """ + The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + """ + return pulumi.get(self, "interface") + @property @pulumi.getter(name="kmsKeySelfLink") def kms_key_self_link(self) -> Optional[str]: @@ -8393,6 +8418,7 @@ def __init__(__self__, *, disk_encryption_key_raw: Optional[str] = None, disk_encryption_key_sha256: Optional[str] = None, initialize_params: Optional['outputs.InstanceFromTemplateBootDiskInitializeParams'] = None, + interface: Optional[str] = None, kms_key_self_link: Optional[str] = None, mode: Optional[str] = None, source: Optional[str] = None): @@ -8402,6 +8428,7 @@ def __init__(__self__, *, :param str disk_encryption_key_raw: A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. :param str disk_encryption_key_sha256: The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource. :param 'InstanceFromTemplateBootDiskInitializeParamsArgs' initialize_params: Parameters with which a disk was created alongside the instance. + :param str interface: The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) :param str kms_key_self_link: The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. :param str mode: Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE". :param str source: The name or self_link of the disk attached to this instance. @@ -8416,6 +8443,8 @@ def __init__(__self__, *, pulumi.set(__self__, "disk_encryption_key_sha256", disk_encryption_key_sha256) if initialize_params is not None: pulumi.set(__self__, "initialize_params", initialize_params) + if interface is not None: + pulumi.set(__self__, "interface", interface) if kms_key_self_link is not None: pulumi.set(__self__, "kms_key_self_link", kms_key_self_link) if mode is not None: @@ -8463,6 +8492,14 @@ def initialize_params(self) -> Optional['outputs.InstanceFromTemplateBootDiskIni """ return pulumi.get(self, "initialize_params") + @property + @pulumi.getter + def interface(self) -> Optional[str]: + """ + The disk interface used for attaching this disk. One of SCSI or NVME. (This field is shared with attached_disk and only used for specific cases, please don't specify this field without advice from Google.) + """ + return pulumi.get(self, "interface") + @property @pulumi.getter(name="kmsKeySelfLink") def kms_key_self_link(self) -> Optional[str]: @@ -10950,7 +10987,7 @@ def __init__(__self__, *, network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. :param str subnetwork_project: The project in which the subnetwork belongs. - If the `subnetwork` is a self_link, this field is ignored in favor of the project + If the `subnetwork` is a self_link, this field is set to the project defined in the subnetwork self_link. If the `subnetwork` is a name and this field is not provided, the provider project is used. """ @@ -11127,7 +11164,7 @@ def subnetwork(self) -> Optional[str]: def subnetwork_project(self) -> Optional[str]: """ The project in which the subnetwork belongs. - If the `subnetwork` is a self_link, this field is ignored in favor of the project + If the `subnetwork` is a self_link, this field is set to the project defined in the subnetwork self_link. If the `subnetwork` is a name and this field is not provided, the provider project is used. """ @@ -15082,6 +15119,60 @@ def project_id(self) -> str: return pulumi.get(self, "project_id") +@pulumi.output_type +class NodeTemplateAccelerator(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "acceleratorCount": + suggest = "accelerator_count" + elif key == "acceleratorType": + suggest = "accelerator_type" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in NodeTemplateAccelerator. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + NodeTemplateAccelerator.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + NodeTemplateAccelerator.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + accelerator_count: Optional[int] = None, + accelerator_type: Optional[str] = None): + """ + :param int accelerator_count: The number of the guest accelerator cards exposed to this + node template. + :param str accelerator_type: Full or partial URL of the accelerator type resource to expose + to this node template. + """ + if accelerator_count is not None: + pulumi.set(__self__, "accelerator_count", accelerator_count) + if accelerator_type is not None: + pulumi.set(__self__, "accelerator_type", accelerator_type) + + @property + @pulumi.getter(name="acceleratorCount") + def accelerator_count(self) -> Optional[int]: + """ + The number of the guest accelerator cards exposed to this + node template. + """ + return pulumi.get(self, "accelerator_count") + + @property + @pulumi.getter(name="acceleratorType") + def accelerator_type(self) -> Optional[str]: + """ + Full or partial URL of the accelerator type resource to expose + to this node template. + """ + return pulumi.get(self, "accelerator_type") + + @pulumi.output_type class NodeTemplateNodeTypeFlexibility(dict): @staticmethod @@ -45910,6 +46001,7 @@ def __init__(__self__, *, disk_encryption_key_raw: str, disk_encryption_key_sha256: str, initialize_params: Sequence['outputs.GetInstanceBootDiskInitializeParamResult'], + interface: str, kms_key_self_link: str, mode: str, source: str): @@ -45923,6 +46015,7 @@ def __init__(__self__, *, () that protects this resource. :param Sequence['GetInstanceBootDiskInitializeParamArgs'] initialize_params: Parameters with which a disk was created alongside the instance. Structure is documented below. + :param str interface: The disk interface used for attaching this disk. One of `SCSI` or `NVME`. :param str kms_key_self_link: The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set. :param str mode: Read/write mode for the disk. One of `"READ_ONLY"` or `"READ_WRITE"`. :param str source: The name or self_link of the disk attached to this instance. @@ -45932,6 +46025,7 @@ def __init__(__self__, *, pulumi.set(__self__, "disk_encryption_key_raw", disk_encryption_key_raw) pulumi.set(__self__, "disk_encryption_key_sha256", disk_encryption_key_sha256) pulumi.set(__self__, "initialize_params", initialize_params) + pulumi.set(__self__, "interface", interface) pulumi.set(__self__, "kms_key_self_link", kms_key_self_link) pulumi.set(__self__, "mode", mode) pulumi.set(__self__, "source", source) @@ -45980,6 +46074,14 @@ def initialize_params(self) -> Sequence['outputs.GetInstanceBootDiskInitializePa """ return pulumi.get(self, "initialize_params") + @property + @pulumi.getter + def interface(self) -> str: + """ + The disk interface used for attaching this disk. One of `SCSI` or `NVME`. + """ + return pulumi.get(self, "interface") + @property @pulumi.getter(name="kmsKeySelfLink") def kms_key_self_link(self) -> str: diff --git a/sdk/python/pulumi_gcp/compute/target_https_proxy.py b/sdk/python/pulumi_gcp/compute/target_https_proxy.py index 615451edd8..158f6ae549 100644 --- a/sdk/python/pulumi_gcp/compute/target_https_proxy.py +++ b/sdk/python/pulumi_gcp/compute/target_https_proxy.py @@ -81,6 +81,10 @@ def __init__(__self__, *, INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. :param pulumi.Input[Sequence[pulumi.Input[str]]] ssl_certificates: URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. sslCertificates and certificateManagerCertificates can not be defined together. @@ -269,6 +273,10 @@ def server_tls_policy(self) -> Optional[pulumi.Input[str]]: INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. """ return pulumi.get(self, "server_tls_policy") @@ -387,6 +395,10 @@ def __init__(__self__, *, INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. :param pulumi.Input[Sequence[pulumi.Input[str]]] ssl_certificates: URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. sslCertificates and certificateManagerCertificates can not be defined together. @@ -607,6 +619,10 @@ def server_tls_policy(self) -> Optional[pulumi.Input[str]]: INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. """ return pulumi.get(self, "server_tls_policy") @@ -973,6 +989,10 @@ def __init__(__self__, INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. :param pulumi.Input[Sequence[pulumi.Input[str]]] ssl_certificates: URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. sslCertificates and certificateManagerCertificates can not be defined together. @@ -1364,6 +1384,10 @@ def get(resource_name: str, INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. :param pulumi.Input[Sequence[pulumi.Input[str]]] ssl_certificates: URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. sslCertificates and certificateManagerCertificates can not be defined together. @@ -1529,6 +1553,10 @@ def server_tls_policy(self) -> pulumi.Output[Optional[str]]: INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted. + If you remove this field from your configuration at the same time as + deleting or recreating a referenced ServerTlsPolicy resource, you will + receive a resourceInUseByAnotherResource error. Use lifecycle.create_before_destroy + within the ServerTlsPolicy resource to avoid this. """ return pulumi.get(self, "server_tls_policy") diff --git a/sdk/python/pulumi_gcp/container/_inputs.py b/sdk/python/pulumi_gcp/container/_inputs.py index c31d167a47..8351310b22 100644 --- a/sdk/python/pulumi_gcp/container/_inputs.py +++ b/sdk/python/pulumi_gcp/container/_inputs.py @@ -323,6 +323,8 @@ 'ClusterNodePoolAutoConfigArgsDict', 'ClusterNodePoolAutoConfigNetworkTagsArgs', 'ClusterNodePoolAutoConfigNetworkTagsArgsDict', + 'ClusterNodePoolAutoConfigNodeKubeletConfigArgs', + 'ClusterNodePoolAutoConfigNodeKubeletConfigArgsDict', 'ClusterNodePoolAutoscalingArgs', 'ClusterNodePoolAutoscalingArgsDict', 'ClusterNodePoolDefaultsArgs', @@ -9735,6 +9737,10 @@ class ClusterNodeConfigKubeletConfigArgsDict(TypedDict): not specifying the `kubelet_config` block should be the equivalent of specifying `none`. """ + insecure_kubelet_readonly_port_enabled: NotRequired[pulumi.Input[str]] + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ pod_pids_limit: NotRequired[pulumi.Input[int]] """ Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. @@ -9748,6 +9754,7 @@ def __init__(__self__, *, cpu_manager_policy: pulumi.Input[str], cpu_cfs_quota: Optional[pulumi.Input[bool]] = None, cpu_cfs_quota_period: Optional[pulumi.Input[str]] = None, + insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[str]] = None, pod_pids_limit: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[str] cpu_manager_policy: The CPU management policy on the node. See @@ -9764,6 +9771,7 @@ def __init__(__self__, *, value and accepts an invalid `default` value instead. While this remains true, not specifying the `kubelet_config` block should be the equivalent of specifying `none`. + :param pulumi.Input[str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. :param pulumi.Input[int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. """ pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy) @@ -9771,6 +9779,8 @@ def __init__(__self__, *, pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota) if cpu_cfs_quota_period is not None: pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period) + if insecure_kubelet_readonly_port_enabled is not None: + pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled) if pod_pids_limit is not None: pulumi.set(__self__, "pod_pids_limit", pod_pids_limit) @@ -9821,6 +9831,18 @@ def cpu_cfs_quota_period(self) -> Optional[pulumi.Input[str]]: def cpu_cfs_quota_period(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cpu_cfs_quota_period", value) + @property + @pulumi.getter(name="insecureKubeletReadonlyPortEnabled") + def insecure_kubelet_readonly_port_enabled(self) -> Optional[pulumi.Input[str]]: + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + return pulumi.get(self, "insecure_kubelet_readonly_port_enabled") + + @insecure_kubelet_readonly_port_enabled.setter + def insecure_kubelet_readonly_port_enabled(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "insecure_kubelet_readonly_port_enabled", value) + @property @pulumi.getter(name="podPidsLimit") def pod_pids_limit(self) -> Optional[pulumi.Input[int]]: @@ -10795,7 +10817,12 @@ def version(self, value: Optional[pulumi.Input[str]]): class ClusterNodePoolAutoConfigArgsDict(TypedDict): network_tags: NotRequired[pulumi.Input['ClusterNodePoolAutoConfigNetworkTagsArgsDict']] """ - The network tag config for the cluster's automatically provisioned node pools. + The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. + """ + node_kubelet_config: NotRequired[pulumi.Input['ClusterNodePoolAutoConfigNodeKubeletConfigArgsDict']] + """ + Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here. + Structure is documented below. """ resource_manager_tags: NotRequired[pulumi.Input[Mapping[str, pulumi.Input[str]]]] """ @@ -10808,13 +10835,18 @@ class ClusterNodePoolAutoConfigArgsDict(TypedDict): class ClusterNodePoolAutoConfigArgs: def __init__(__self__, *, network_tags: Optional[pulumi.Input['ClusterNodePoolAutoConfigNetworkTagsArgs']] = None, + node_kubelet_config: Optional[pulumi.Input['ClusterNodePoolAutoConfigNodeKubeletConfigArgs']] = None, resource_manager_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ - :param pulumi.Input['ClusterNodePoolAutoConfigNetworkTagsArgs'] network_tags: The network tag config for the cluster's automatically provisioned node pools. + :param pulumi.Input['ClusterNodePoolAutoConfigNetworkTagsArgs'] network_tags: The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. + :param pulumi.Input['ClusterNodePoolAutoConfigNodeKubeletConfigArgs'] node_kubelet_config: Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here. + Structure is documented below. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_manager_tags: A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. """ if network_tags is not None: pulumi.set(__self__, "network_tags", network_tags) + if node_kubelet_config is not None: + pulumi.set(__self__, "node_kubelet_config", node_kubelet_config) if resource_manager_tags is not None: pulumi.set(__self__, "resource_manager_tags", resource_manager_tags) @@ -10822,7 +10854,7 @@ def __init__(__self__, *, @pulumi.getter(name="networkTags") def network_tags(self) -> Optional[pulumi.Input['ClusterNodePoolAutoConfigNetworkTagsArgs']]: """ - The network tag config for the cluster's automatically provisioned node pools. + The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. """ return pulumi.get(self, "network_tags") @@ -10830,6 +10862,19 @@ def network_tags(self) -> Optional[pulumi.Input['ClusterNodePoolAutoConfigNetwor def network_tags(self, value: Optional[pulumi.Input['ClusterNodePoolAutoConfigNetworkTagsArgs']]): pulumi.set(self, "network_tags", value) + @property + @pulumi.getter(name="nodeKubeletConfig") + def node_kubelet_config(self) -> Optional[pulumi.Input['ClusterNodePoolAutoConfigNodeKubeletConfigArgs']]: + """ + Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here. + Structure is documented below. + """ + return pulumi.get(self, "node_kubelet_config") + + @node_kubelet_config.setter + def node_kubelet_config(self, value: Optional[pulumi.Input['ClusterNodePoolAutoConfigNodeKubeletConfigArgs']]): + pulumi.set(self, "node_kubelet_config", value) + @property @pulumi.getter(name="resourceManagerTags") def resource_manager_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: @@ -10875,6 +10920,38 @@ def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "tags", value) +if not MYPY: + class ClusterNodePoolAutoConfigNodeKubeletConfigArgsDict(TypedDict): + insecure_kubelet_readonly_port_enabled: NotRequired[pulumi.Input[str]] + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ +elif False: + ClusterNodePoolAutoConfigNodeKubeletConfigArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class ClusterNodePoolAutoConfigNodeKubeletConfigArgs: + def __init__(__self__, *, + insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + if insecure_kubelet_readonly_port_enabled is not None: + pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled) + + @property + @pulumi.getter(name="insecureKubeletReadonlyPortEnabled") + def insecure_kubelet_readonly_port_enabled(self) -> Optional[pulumi.Input[str]]: + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + return pulumi.get(self, "insecure_kubelet_readonly_port_enabled") + + @insecure_kubelet_readonly_port_enabled.setter + def insecure_kubelet_readonly_port_enabled(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "insecure_kubelet_readonly_port_enabled", value) + + if not MYPY: class ClusterNodePoolAutoscalingArgsDict(TypedDict): location_policy: NotRequired[pulumi.Input[str]] @@ -11029,6 +11106,10 @@ class ClusterNodePoolDefaultsNodeConfigDefaultsArgsDict(TypedDict): """ The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming) across all the node pools within the cluster. Structure is documented below. """ + insecure_kubelet_readonly_port_enabled: NotRequired[pulumi.Input[str]] + """ + Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ logging_variant: NotRequired[pulumi.Input[str]] """ The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. @@ -11041,16 +11122,20 @@ class ClusterNodePoolDefaultsNodeConfigDefaultsArgs: def __init__(__self__, *, containerd_config: Optional[pulumi.Input['ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigArgs']] = None, gcfs_config: Optional[pulumi.Input['ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfigArgs']] = None, + insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[str]] = None, logging_variant: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input['ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigArgs'] containerd_config: Parameters for containerd configuration. :param pulumi.Input['ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfigArgs'] gcfs_config: The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming) across all the node pools within the cluster. Structure is documented below. + :param pulumi.Input[str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. :param pulumi.Input[str] logging_variant: The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. """ if containerd_config is not None: pulumi.set(__self__, "containerd_config", containerd_config) if gcfs_config is not None: pulumi.set(__self__, "gcfs_config", gcfs_config) + if insecure_kubelet_readonly_port_enabled is not None: + pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled) if logging_variant is not None: pulumi.set(__self__, "logging_variant", logging_variant) @@ -11078,6 +11163,18 @@ def gcfs_config(self) -> Optional[pulumi.Input['ClusterNodePoolDefaultsNodeConfi def gcfs_config(self, value: Optional[pulumi.Input['ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfigArgs']]): pulumi.set(self, "gcfs_config", value) + @property + @pulumi.getter(name="insecureKubeletReadonlyPortEnabled") + def insecure_kubelet_readonly_port_enabled(self) -> Optional[pulumi.Input[str]]: + """ + Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + return pulumi.get(self, "insecure_kubelet_readonly_port_enabled") + + @insecure_kubelet_readonly_port_enabled.setter + def insecure_kubelet_readonly_port_enabled(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "insecure_kubelet_readonly_port_enabled", value) + @property @pulumi.getter(name="loggingVariant") def logging_variant(self) -> Optional[pulumi.Input[str]]: @@ -13453,6 +13550,10 @@ class ClusterNodePoolNodeConfigKubeletConfigArgsDict(TypedDict): not specifying the `kubelet_config` block should be the equivalent of specifying `none`. """ + insecure_kubelet_readonly_port_enabled: NotRequired[pulumi.Input[str]] + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ pod_pids_limit: NotRequired[pulumi.Input[int]] """ Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. @@ -13466,6 +13567,7 @@ def __init__(__self__, *, cpu_manager_policy: pulumi.Input[str], cpu_cfs_quota: Optional[pulumi.Input[bool]] = None, cpu_cfs_quota_period: Optional[pulumi.Input[str]] = None, + insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[str]] = None, pod_pids_limit: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[str] cpu_manager_policy: The CPU management policy on the node. See @@ -13482,6 +13584,7 @@ def __init__(__self__, *, value and accepts an invalid `default` value instead. While this remains true, not specifying the `kubelet_config` block should be the equivalent of specifying `none`. + :param pulumi.Input[str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. :param pulumi.Input[int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. """ pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy) @@ -13489,6 +13592,8 @@ def __init__(__self__, *, pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota) if cpu_cfs_quota_period is not None: pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period) + if insecure_kubelet_readonly_port_enabled is not None: + pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled) if pod_pids_limit is not None: pulumi.set(__self__, "pod_pids_limit", pod_pids_limit) @@ -13539,6 +13644,18 @@ def cpu_cfs_quota_period(self) -> Optional[pulumi.Input[str]]: def cpu_cfs_quota_period(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cpu_cfs_quota_period", value) + @property + @pulumi.getter(name="insecureKubeletReadonlyPortEnabled") + def insecure_kubelet_readonly_port_enabled(self) -> Optional[pulumi.Input[str]]: + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + return pulumi.get(self, "insecure_kubelet_readonly_port_enabled") + + @insecure_kubelet_readonly_port_enabled.setter + def insecure_kubelet_readonly_port_enabled(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "insecure_kubelet_readonly_port_enabled", value) + @property @pulumi.getter(name="podPidsLimit") def pod_pids_limit(self) -> Optional[pulumi.Input[int]]: @@ -17484,6 +17601,10 @@ class NodePoolNodeConfigKubeletConfigArgsDict(TypedDict): """ Set the CPU CFS quota period value 'cpu.cfs_period_us'. """ + insecure_kubelet_readonly_port_enabled: NotRequired[pulumi.Input[str]] + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ pod_pids_limit: NotRequired[pulumi.Input[int]] """ Controls the maximum number of processes allowed to run in a pod. @@ -17497,11 +17618,13 @@ def __init__(__self__, *, cpu_manager_policy: pulumi.Input[str], cpu_cfs_quota: Optional[pulumi.Input[bool]] = None, cpu_cfs_quota_period: Optional[pulumi.Input[str]] = None, + insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[str]] = None, pod_pids_limit: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[str] cpu_manager_policy: Control the CPU management policy on the node. :param pulumi.Input[bool] cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits. :param pulumi.Input[str] cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'. + :param pulumi.Input[str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. :param pulumi.Input[int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. """ pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy) @@ -17509,6 +17632,8 @@ def __init__(__self__, *, pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota) if cpu_cfs_quota_period is not None: pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period) + if insecure_kubelet_readonly_port_enabled is not None: + pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled) if pod_pids_limit is not None: pulumi.set(__self__, "pod_pids_limit", pod_pids_limit) @@ -17548,6 +17673,18 @@ def cpu_cfs_quota_period(self) -> Optional[pulumi.Input[str]]: def cpu_cfs_quota_period(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cpu_cfs_quota_period", value) + @property + @pulumi.getter(name="insecureKubeletReadonlyPortEnabled") + def insecure_kubelet_readonly_port_enabled(self) -> Optional[pulumi.Input[str]]: + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + return pulumi.get(self, "insecure_kubelet_readonly_port_enabled") + + @insecure_kubelet_readonly_port_enabled.setter + def insecure_kubelet_readonly_port_enabled(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "insecure_kubelet_readonly_port_enabled", value) + @property @pulumi.getter(name="podPidsLimit") def pod_pids_limit(self) -> Optional[pulumi.Input[int]]: diff --git a/sdk/python/pulumi_gcp/container/attached_cluster.py b/sdk/python/pulumi_gcp/container/attached_cluster.py index 497907ebc5..15afd59c7c 100644 --- a/sdk/python/pulumi_gcp/container/attached_cluster.py +++ b/sdk/python/pulumi_gcp/container/attached_cluster.py @@ -63,7 +63,7 @@ def __init__(__self__, *, 'effective_annotations' for all of the annotations present on the resource. :param pulumi.Input['AttachedClusterAuthorizationArgs'] authorization: Configuration related to the cluster RBAC settings. :param pulumi.Input['AttachedClusterBinaryAuthorizationArgs'] binary_authorization: Binary Authorization configuration. - :param pulumi.Input[str] deletion_policy: Policy to determine what flags to send on delete. + :param pulumi.Input[str] deletion_policy: Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS :param pulumi.Input[str] description: A human readable description of this attached cluster. Cannot be longer than 255 UTF-8 encoded bytes. :param pulumi.Input['AttachedClusterLoggingConfigArgs'] logging_config: Logging configuration. :param pulumi.Input['AttachedClusterMonitoringConfigArgs'] monitoring_config: Monitoring configuration. @@ -213,7 +213,7 @@ def binary_authorization(self, value: Optional[pulumi.Input['AttachedClusterBina @pulumi.getter(name="deletionPolicy") def deletion_policy(self) -> Optional[pulumi.Input[str]]: """ - Policy to determine what flags to send on delete. + Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS """ return pulumi.get(self, "deletion_policy") @@ -333,7 +333,7 @@ def __init__(__self__, *, For EKS clusters, this is an AWS region. For AKS clusters, this is an Azure region. :param pulumi.Input[str] create_time: Output only. The time at which this cluster was created. - :param pulumi.Input[str] deletion_policy: Policy to determine what flags to send on delete. + :param pulumi.Input[str] deletion_policy: Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS :param pulumi.Input[str] description: A human readable description of this attached cluster. Cannot be longer than 255 UTF-8 encoded bytes. :param pulumi.Input[str] distribution: The Kubernetes distribution of the underlying attached cluster. Supported values: "eks", "aks", "generic". The generic distribution provides the ability to register @@ -490,7 +490,7 @@ def create_time(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="deletionPolicy") def deletion_policy(self) -> Optional[pulumi.Input[str]]: """ - Policy to determine what flags to send on delete. + Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS """ return pulumi.get(self, "deletion_policy") @@ -912,7 +912,7 @@ def __init__(__self__, 'effective_annotations' for all of the annotations present on the resource. :param pulumi.Input[Union['AttachedClusterAuthorizationArgs', 'AttachedClusterAuthorizationArgsDict']] authorization: Configuration related to the cluster RBAC settings. :param pulumi.Input[Union['AttachedClusterBinaryAuthorizationArgs', 'AttachedClusterBinaryAuthorizationArgsDict']] binary_authorization: Binary Authorization configuration. - :param pulumi.Input[str] deletion_policy: Policy to determine what flags to send on delete. + :param pulumi.Input[str] deletion_policy: Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS :param pulumi.Input[str] description: A human readable description of this attached cluster. Cannot be longer than 255 UTF-8 encoded bytes. :param pulumi.Input[str] distribution: The Kubernetes distribution of the underlying attached cluster. Supported values: "eks", "aks", "generic". The generic distribution provides the ability to register @@ -1214,7 +1214,7 @@ def get(resource_name: str, For EKS clusters, this is an AWS region. For AKS clusters, this is an Azure region. :param pulumi.Input[str] create_time: Output only. The time at which this cluster was created. - :param pulumi.Input[str] deletion_policy: Policy to determine what flags to send on delete. + :param pulumi.Input[str] deletion_policy: Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS :param pulumi.Input[str] description: A human readable description of this attached cluster. Cannot be longer than 255 UTF-8 encoded bytes. :param pulumi.Input[str] distribution: The Kubernetes distribution of the underlying attached cluster. Supported values: "eks", "aks", "generic". The generic distribution provides the ability to register @@ -1331,7 +1331,7 @@ def create_time(self) -> pulumi.Output[str]: @pulumi.getter(name="deletionPolicy") def deletion_policy(self) -> pulumi.Output[Optional[str]]: """ - Policy to determine what flags to send on delete. + Policy to determine what flags to send on delete. Possible values: DELETE, DELETE_IGNORE_ERRORS """ return pulumi.get(self, "deletion_policy") diff --git a/sdk/python/pulumi_gcp/container/outputs.py b/sdk/python/pulumi_gcp/container/outputs.py index 7dcd876573..4711edb821 100644 --- a/sdk/python/pulumi_gcp/container/outputs.py +++ b/sdk/python/pulumi_gcp/container/outputs.py @@ -170,6 +170,7 @@ 'ClusterNodePool', 'ClusterNodePoolAutoConfig', 'ClusterNodePoolAutoConfigNetworkTags', + 'ClusterNodePoolAutoConfigNodeKubeletConfig', 'ClusterNodePoolAutoscaling', 'ClusterNodePoolDefaults', 'ClusterNodePoolDefaultsNodeConfigDefaults', @@ -362,6 +363,7 @@ 'GetClusterNodePoolResult', 'GetClusterNodePoolAutoConfigResult', 'GetClusterNodePoolAutoConfigNetworkTagResult', + 'GetClusterNodePoolAutoConfigNodeKubeletConfigResult', 'GetClusterNodePoolAutoscalingResult', 'GetClusterNodePoolDefaultResult', 'GetClusterNodePoolDefaultNodeConfigDefaultResult', @@ -7875,6 +7877,8 @@ def __key_warning(key: str): suggest = "cpu_cfs_quota" elif key == "cpuCfsQuotaPeriod": suggest = "cpu_cfs_quota_period" + elif key == "insecureKubeletReadonlyPortEnabled": + suggest = "insecure_kubelet_readonly_port_enabled" elif key == "podPidsLimit": suggest = "pod_pids_limit" @@ -7893,6 +7897,7 @@ def __init__(__self__, *, cpu_manager_policy: str, cpu_cfs_quota: Optional[bool] = None, cpu_cfs_quota_period: Optional[str] = None, + insecure_kubelet_readonly_port_enabled: Optional[str] = None, pod_pids_limit: Optional[int] = None): """ :param str cpu_manager_policy: The CPU management policy on the node. See @@ -7909,6 +7914,7 @@ def __init__(__self__, *, value and accepts an invalid `default` value instead. While this remains true, not specifying the `kubelet_config` block should be the equivalent of specifying `none`. + :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. :param int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. """ pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy) @@ -7916,6 +7922,8 @@ def __init__(__self__, *, pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota) if cpu_cfs_quota_period is not None: pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period) + if insecure_kubelet_readonly_port_enabled is not None: + pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled) if pod_pids_limit is not None: pulumi.set(__self__, "pod_pids_limit", pod_pids_limit) @@ -7954,6 +7962,14 @@ def cpu_cfs_quota_period(self) -> Optional[str]: """ return pulumi.get(self, "cpu_cfs_quota_period") + @property + @pulumi.getter(name="insecureKubeletReadonlyPortEnabled") + def insecure_kubelet_readonly_port_enabled(self) -> Optional[str]: + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + return pulumi.get(self, "insecure_kubelet_readonly_port_enabled") + @property @pulumi.getter(name="podPidsLimit") def pod_pids_limit(self) -> Optional[int]: @@ -8707,6 +8723,8 @@ def __key_warning(key: str): suggest = None if key == "networkTags": suggest = "network_tags" + elif key == "nodeKubeletConfig": + suggest = "node_kubelet_config" elif key == "resourceManagerTags": suggest = "resource_manager_tags" @@ -8723,13 +8741,18 @@ def get(self, key: str, default = None) -> Any: def __init__(__self__, *, network_tags: Optional['outputs.ClusterNodePoolAutoConfigNetworkTags'] = None, + node_kubelet_config: Optional['outputs.ClusterNodePoolAutoConfigNodeKubeletConfig'] = None, resource_manager_tags: Optional[Mapping[str, str]] = None): """ - :param 'ClusterNodePoolAutoConfigNetworkTagsArgs' network_tags: The network tag config for the cluster's automatically provisioned node pools. + :param 'ClusterNodePoolAutoConfigNetworkTagsArgs' network_tags: The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. + :param 'ClusterNodePoolAutoConfigNodeKubeletConfigArgs' node_kubelet_config: Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here. + Structure is documented below. :param Mapping[str, str] resource_manager_tags: A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`. """ if network_tags is not None: pulumi.set(__self__, "network_tags", network_tags) + if node_kubelet_config is not None: + pulumi.set(__self__, "node_kubelet_config", node_kubelet_config) if resource_manager_tags is not None: pulumi.set(__self__, "resource_manager_tags", resource_manager_tags) @@ -8737,10 +8760,19 @@ def __init__(__self__, *, @pulumi.getter(name="networkTags") def network_tags(self) -> Optional['outputs.ClusterNodePoolAutoConfigNetworkTags']: """ - The network tag config for the cluster's automatically provisioned node pools. + The network tag config for the cluster's automatically provisioned node pools. Structure is documented below. """ return pulumi.get(self, "network_tags") + @property + @pulumi.getter(name="nodeKubeletConfig") + def node_kubelet_config(self) -> Optional['outputs.ClusterNodePoolAutoConfigNodeKubeletConfig']: + """ + Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here. + Structure is documented below. + """ + return pulumi.get(self, "node_kubelet_config") + @property @pulumi.getter(name="resourceManagerTags") def resource_manager_tags(self) -> Optional[Mapping[str, str]]: @@ -8769,6 +8801,42 @@ def tags(self) -> Optional[Sequence[str]]: return pulumi.get(self, "tags") +@pulumi.output_type +class ClusterNodePoolAutoConfigNodeKubeletConfig(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "insecureKubeletReadonlyPortEnabled": + suggest = "insecure_kubelet_readonly_port_enabled" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolAutoConfigNodeKubeletConfig. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + ClusterNodePoolAutoConfigNodeKubeletConfig.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + ClusterNodePoolAutoConfigNodeKubeletConfig.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + insecure_kubelet_readonly_port_enabled: Optional[str] = None): + """ + :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + if insecure_kubelet_readonly_port_enabled is not None: + pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled) + + @property + @pulumi.getter(name="insecureKubeletReadonlyPortEnabled") + def insecure_kubelet_readonly_port_enabled(self) -> Optional[str]: + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + return pulumi.get(self, "insecure_kubelet_readonly_port_enabled") + + @pulumi.output_type class ClusterNodePoolAutoscaling(dict): @staticmethod @@ -8906,6 +8974,8 @@ def __key_warning(key: str): suggest = "containerd_config" elif key == "gcfsConfig": suggest = "gcfs_config" + elif key == "insecureKubeletReadonlyPortEnabled": + suggest = "insecure_kubelet_readonly_port_enabled" elif key == "loggingVariant": suggest = "logging_variant" @@ -8923,16 +8993,20 @@ def get(self, key: str, default = None) -> Any: def __init__(__self__, *, containerd_config: Optional['outputs.ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfig'] = None, gcfs_config: Optional['outputs.ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfig'] = None, + insecure_kubelet_readonly_port_enabled: Optional[str] = None, logging_variant: Optional[str] = None): """ :param 'ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigArgs' containerd_config: Parameters for containerd configuration. :param 'ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfigArgs' gcfs_config: The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming) across all the node pools within the cluster. Structure is documented below. + :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. :param str logging_variant: The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information. """ if containerd_config is not None: pulumi.set(__self__, "containerd_config", containerd_config) if gcfs_config is not None: pulumi.set(__self__, "gcfs_config", gcfs_config) + if insecure_kubelet_readonly_port_enabled is not None: + pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled) if logging_variant is not None: pulumi.set(__self__, "logging_variant", logging_variant) @@ -8952,6 +9026,14 @@ def gcfs_config(self) -> Optional['outputs.ClusterNodePoolDefaultsNodeConfigDefa """ return pulumi.get(self, "gcfs_config") + @property + @pulumi.getter(name="insecureKubeletReadonlyPortEnabled") + def insecure_kubelet_readonly_port_enabled(self) -> Optional[str]: + """ + Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + return pulumi.get(self, "insecure_kubelet_readonly_port_enabled") + @property @pulumi.getter(name="loggingVariant") def logging_variant(self) -> Optional[str]: @@ -10804,6 +10886,8 @@ def __key_warning(key: str): suggest = "cpu_cfs_quota" elif key == "cpuCfsQuotaPeriod": suggest = "cpu_cfs_quota_period" + elif key == "insecureKubeletReadonlyPortEnabled": + suggest = "insecure_kubelet_readonly_port_enabled" elif key == "podPidsLimit": suggest = "pod_pids_limit" @@ -10822,6 +10906,7 @@ def __init__(__self__, *, cpu_manager_policy: str, cpu_cfs_quota: Optional[bool] = None, cpu_cfs_quota_period: Optional[str] = None, + insecure_kubelet_readonly_port_enabled: Optional[str] = None, pod_pids_limit: Optional[int] = None): """ :param str cpu_manager_policy: The CPU management policy on the node. See @@ -10838,6 +10923,7 @@ def __init__(__self__, *, value and accepts an invalid `default` value instead. While this remains true, not specifying the `kubelet_config` block should be the equivalent of specifying `none`. + :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. :param int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304. """ pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy) @@ -10845,6 +10931,8 @@ def __init__(__self__, *, pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota) if cpu_cfs_quota_period is not None: pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period) + if insecure_kubelet_readonly_port_enabled is not None: + pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled) if pod_pids_limit is not None: pulumi.set(__self__, "pod_pids_limit", pod_pids_limit) @@ -10883,6 +10971,14 @@ def cpu_cfs_quota_period(self) -> Optional[str]: """ return pulumi.get(self, "cpu_cfs_quota_period") + @property + @pulumi.getter(name="insecureKubeletReadonlyPortEnabled") + def insecure_kubelet_readonly_port_enabled(self) -> Optional[str]: + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + return pulumi.get(self, "insecure_kubelet_readonly_port_enabled") + @property @pulumi.getter(name="podPidsLimit") def pod_pids_limit(self) -> Optional[int]: @@ -14018,6 +14114,8 @@ def __key_warning(key: str): suggest = "cpu_cfs_quota" elif key == "cpuCfsQuotaPeriod": suggest = "cpu_cfs_quota_period" + elif key == "insecureKubeletReadonlyPortEnabled": + suggest = "insecure_kubelet_readonly_port_enabled" elif key == "podPidsLimit": suggest = "pod_pids_limit" @@ -14036,11 +14134,13 @@ def __init__(__self__, *, cpu_manager_policy: str, cpu_cfs_quota: Optional[bool] = None, cpu_cfs_quota_period: Optional[str] = None, + insecure_kubelet_readonly_port_enabled: Optional[str] = None, pod_pids_limit: Optional[int] = None): """ :param str cpu_manager_policy: Control the CPU management policy on the node. :param bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits. :param str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'. + :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. :param int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. """ pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy) @@ -14048,6 +14148,8 @@ def __init__(__self__, *, pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota) if cpu_cfs_quota_period is not None: pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period) + if insecure_kubelet_readonly_port_enabled is not None: + pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled) if pod_pids_limit is not None: pulumi.set(__self__, "pod_pids_limit", pod_pids_limit) @@ -14075,6 +14177,14 @@ def cpu_cfs_quota_period(self) -> Optional[str]: """ return pulumi.get(self, "cpu_cfs_quota_period") + @property + @pulumi.getter(name="insecureKubeletReadonlyPortEnabled") + def insecure_kubelet_readonly_port_enabled(self) -> Optional[str]: + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + return pulumi.get(self, "insecure_kubelet_readonly_port_enabled") + @property @pulumi.getter(name="podPidsLimit") def pod_pids_limit(self) -> Optional[int]: @@ -17344,16 +17454,19 @@ def __init__(__self__, *, cpu_cfs_quota: bool, cpu_cfs_quota_period: str, cpu_manager_policy: str, + insecure_kubelet_readonly_port_enabled: str, pod_pids_limit: int): """ :param bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits. :param str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'. :param str cpu_manager_policy: Control the CPU management policy on the node. + :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. :param int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. """ pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota) pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period) pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy) + pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled) pulumi.set(__self__, "pod_pids_limit", pod_pids_limit) @property @@ -17380,6 +17493,14 @@ def cpu_manager_policy(self) -> str: """ return pulumi.get(self, "cpu_manager_policy") + @property + @pulumi.getter(name="insecureKubeletReadonlyPortEnabled") + def insecure_kubelet_readonly_port_enabled(self) -> str: + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + return pulumi.get(self, "insecure_kubelet_readonly_port_enabled") + @property @pulumi.getter(name="podPidsLimit") def pod_pids_limit(self) -> int: @@ -17851,12 +17972,15 @@ def version(self) -> str: class GetClusterNodePoolAutoConfigResult(dict): def __init__(__self__, *, network_tags: Sequence['outputs.GetClusterNodePoolAutoConfigNetworkTagResult'], + node_kubelet_configs: Sequence['outputs.GetClusterNodePoolAutoConfigNodeKubeletConfigResult'], resource_manager_tags: Mapping[str, str]): """ :param Sequence['GetClusterNodePoolAutoConfigNetworkTagArgs'] network_tags: Collection of Compute Engine network tags that can be applied to a node's underlying VM instance. + :param Sequence['GetClusterNodePoolAutoConfigNodeKubeletConfigArgs'] node_kubelet_configs: Node kubelet configs. :param Mapping[str, str] resource_manager_tags: A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty. """ pulumi.set(__self__, "network_tags", network_tags) + pulumi.set(__self__, "node_kubelet_configs", node_kubelet_configs) pulumi.set(__self__, "resource_manager_tags", resource_manager_tags) @property @@ -17867,6 +17991,14 @@ def network_tags(self) -> Sequence['outputs.GetClusterNodePoolAutoConfigNetworkT """ return pulumi.get(self, "network_tags") + @property + @pulumi.getter(name="nodeKubeletConfigs") + def node_kubelet_configs(self) -> Sequence['outputs.GetClusterNodePoolAutoConfigNodeKubeletConfigResult']: + """ + Node kubelet configs. + """ + return pulumi.get(self, "node_kubelet_configs") + @property @pulumi.getter(name="resourceManagerTags") def resource_manager_tags(self) -> Mapping[str, str]: @@ -17894,6 +18026,24 @@ def tags(self) -> Sequence[str]: return pulumi.get(self, "tags") +@pulumi.output_type +class GetClusterNodePoolAutoConfigNodeKubeletConfigResult(dict): + def __init__(__self__, *, + insecure_kubelet_readonly_port_enabled: str): + """ + :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled) + + @property + @pulumi.getter(name="insecureKubeletReadonlyPortEnabled") + def insecure_kubelet_readonly_port_enabled(self) -> str: + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + return pulumi.get(self, "insecure_kubelet_readonly_port_enabled") + + @pulumi.output_type class GetClusterNodePoolAutoscalingResult(dict): def __init__(__self__, *, @@ -17979,14 +18129,17 @@ class GetClusterNodePoolDefaultNodeConfigDefaultResult(dict): def __init__(__self__, *, containerd_configs: Sequence['outputs.GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigResult'], gcfs_configs: Sequence['outputs.GetClusterNodePoolDefaultNodeConfigDefaultGcfsConfigResult'], + insecure_kubelet_readonly_port_enabled: str, logging_variant: str): """ :param Sequence['GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigArgs'] containerd_configs: Parameters for containerd configuration. :param Sequence['GetClusterNodePoolDefaultNodeConfigDefaultGcfsConfigArgs'] gcfs_configs: GCFS configuration for this node. + :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. :param str logging_variant: Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. """ pulumi.set(__self__, "containerd_configs", containerd_configs) pulumi.set(__self__, "gcfs_configs", gcfs_configs) + pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled) pulumi.set(__self__, "logging_variant", logging_variant) @property @@ -18005,6 +18158,14 @@ def gcfs_configs(self) -> Sequence['outputs.GetClusterNodePoolDefaultNodeConfigD """ return pulumi.get(self, "gcfs_configs") + @property + @pulumi.getter(name="insecureKubeletReadonlyPortEnabled") + def insecure_kubelet_readonly_port_enabled(self) -> str: + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + return pulumi.get(self, "insecure_kubelet_readonly_port_enabled") + @property @pulumi.getter(name="loggingVariant") def logging_variant(self) -> str: @@ -19200,16 +19361,19 @@ def __init__(__self__, *, cpu_cfs_quota: bool, cpu_cfs_quota_period: str, cpu_manager_policy: str, + insecure_kubelet_readonly_port_enabled: str, pod_pids_limit: int): """ :param bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits. :param str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'. :param str cpu_manager_policy: Control the CPU management policy on the node. + :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. :param int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. """ pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota) pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period) pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy) + pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled) pulumi.set(__self__, "pod_pids_limit", pod_pids_limit) @property @@ -19236,6 +19400,14 @@ def cpu_manager_policy(self) -> str: """ return pulumi.get(self, "cpu_manager_policy") + @property + @pulumi.getter(name="insecureKubeletReadonlyPortEnabled") + def insecure_kubelet_readonly_port_enabled(self) -> str: + """ + Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`. + """ + return pulumi.get(self, "insecure_kubelet_readonly_port_enabled") + @property @pulumi.getter(name="podPidsLimit") def pod_pids_limit(self) -> int: diff --git a/sdk/python/pulumi_gcp/databasemigrationservice/_inputs.py b/sdk/python/pulumi_gcp/databasemigrationservice/_inputs.py index eb39af5706..ab86cbe84e 100644 --- a/sdk/python/pulumi_gcp/databasemigrationservice/_inputs.py +++ b/sdk/python/pulumi_gcp/databasemigrationservice/_inputs.py @@ -1191,137 +1191,117 @@ def message(self, value: Optional[pulumi.Input[str]]): if not MYPY: class ConnectionProfileMysqlArgsDict(TypedDict): - host: pulumi.Input[str] - """ - Required. The IP or hostname of the source MySQL database. - """ - password: pulumi.Input[str] - """ - Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. - This field is not returned on request, and the value is encrypted when stored in Database Migration Service. - **Note**: This property is sensitive and will not be displayed in the plan. + cloud_sql_id: NotRequired[pulumi.Input[str]] """ - port: pulumi.Input[int] + If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. """ - Required. The network port of the source MySQL database. + host: NotRequired[pulumi.Input[str]] """ - username: pulumi.Input[str] + The IP or hostname of the source MySQL database. """ - Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. - """ - cloud_sql_id: NotRequired[pulumi.Input[str]] + password: NotRequired[pulumi.Input[str]] """ - If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. + Input only. The password for the user that Database Migration Service will be using to connect to the database. + This field is not returned on request, and the value is encrypted when stored in Database Migration Service. + **Note**: This property is sensitive and will not be displayed in the plan. """ password_set: NotRequired[pulumi.Input[bool]] """ (Output) Output only. Indicates If this connection profile password is stored. """ + port: NotRequired[pulumi.Input[int]] + """ + The network port of the source MySQL database. + """ ssl: NotRequired[pulumi.Input['ConnectionProfileMysqlSslArgsDict']] """ SSL configuration for the destination to connect to the source database. Structure is documented below. """ + username: NotRequired[pulumi.Input[str]] + """ + The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + """ elif False: ConnectionProfileMysqlArgsDict: TypeAlias = Mapping[str, Any] @pulumi.input_type class ConnectionProfileMysqlArgs: def __init__(__self__, *, - host: pulumi.Input[str], - password: pulumi.Input[str], - port: pulumi.Input[int], - username: pulumi.Input[str], cloud_sql_id: Optional[pulumi.Input[str]] = None, + host: Optional[pulumi.Input[str]] = None, + password: Optional[pulumi.Input[str]] = None, password_set: Optional[pulumi.Input[bool]] = None, - ssl: Optional[pulumi.Input['ConnectionProfileMysqlSslArgs']] = None): + port: Optional[pulumi.Input[int]] = None, + ssl: Optional[pulumi.Input['ConnectionProfileMysqlSslArgs']] = None, + username: Optional[pulumi.Input[str]] = None): """ - :param pulumi.Input[str] host: Required. The IP or hostname of the source MySQL database. - :param pulumi.Input[str] password: Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + :param pulumi.Input[str] cloud_sql_id: If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. + :param pulumi.Input[str] host: The IP or hostname of the source MySQL database. + :param pulumi.Input[str] password: Input only. The password for the user that Database Migration Service will be using to connect to the database. This field is not returned on request, and the value is encrypted when stored in Database Migration Service. **Note**: This property is sensitive and will not be displayed in the plan. - :param pulumi.Input[int] port: Required. The network port of the source MySQL database. - :param pulumi.Input[str] username: Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. - :param pulumi.Input[str] cloud_sql_id: If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. :param pulumi.Input[bool] password_set: (Output) Output only. Indicates If this connection profile password is stored. + :param pulumi.Input[int] port: The network port of the source MySQL database. :param pulumi.Input['ConnectionProfileMysqlSslArgs'] ssl: SSL configuration for the destination to connect to the source database. Structure is documented below. + :param pulumi.Input[str] username: The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. """ - pulumi.set(__self__, "host", host) - pulumi.set(__self__, "password", password) - pulumi.set(__self__, "port", port) - pulumi.set(__self__, "username", username) if cloud_sql_id is not None: pulumi.set(__self__, "cloud_sql_id", cloud_sql_id) + if host is not None: + pulumi.set(__self__, "host", host) + if password is not None: + pulumi.set(__self__, "password", password) if password_set is not None: pulumi.set(__self__, "password_set", password_set) + if port is not None: + pulumi.set(__self__, "port", port) if ssl is not None: pulumi.set(__self__, "ssl", ssl) + if username is not None: + pulumi.set(__self__, "username", username) + + @property + @pulumi.getter(name="cloudSqlId") + def cloud_sql_id(self) -> Optional[pulumi.Input[str]]: + """ + If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. + """ + return pulumi.get(self, "cloud_sql_id") + + @cloud_sql_id.setter + def cloud_sql_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "cloud_sql_id", value) @property @pulumi.getter - def host(self) -> pulumi.Input[str]: + def host(self) -> Optional[pulumi.Input[str]]: """ - Required. The IP or hostname of the source MySQL database. + The IP or hostname of the source MySQL database. """ return pulumi.get(self, "host") @host.setter - def host(self, value: pulumi.Input[str]): + def host(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host", value) @property @pulumi.getter - def password(self) -> pulumi.Input[str]: + def password(self) -> Optional[pulumi.Input[str]]: """ - Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + Input only. The password for the user that Database Migration Service will be using to connect to the database. This field is not returned on request, and the value is encrypted when stored in Database Migration Service. **Note**: This property is sensitive and will not be displayed in the plan. """ return pulumi.get(self, "password") @password.setter - def password(self, value: pulumi.Input[str]): + def password(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "password", value) - @property - @pulumi.getter - def port(self) -> pulumi.Input[int]: - """ - Required. The network port of the source MySQL database. - """ - return pulumi.get(self, "port") - - @port.setter - def port(self, value: pulumi.Input[int]): - pulumi.set(self, "port", value) - - @property - @pulumi.getter - def username(self) -> pulumi.Input[str]: - """ - Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. - """ - return pulumi.get(self, "username") - - @username.setter - def username(self, value: pulumi.Input[str]): - pulumi.set(self, "username", value) - - @property - @pulumi.getter(name="cloudSqlId") - def cloud_sql_id(self) -> Optional[pulumi.Input[str]]: - """ - If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. - """ - return pulumi.get(self, "cloud_sql_id") - - @cloud_sql_id.setter - def cloud_sql_id(self, value: Optional[pulumi.Input[str]]): - pulumi.set(self, "cloud_sql_id", value) - @property @pulumi.getter(name="passwordSet") def password_set(self) -> Optional[pulumi.Input[bool]]: @@ -1335,6 +1315,18 @@ def password_set(self) -> Optional[pulumi.Input[bool]]: def password_set(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "password_set", value) + @property + @pulumi.getter + def port(self) -> Optional[pulumi.Input[int]]: + """ + The network port of the source MySQL database. + """ + return pulumi.get(self, "port") + + @port.setter + def port(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "port", value) + @property @pulumi.getter def ssl(self) -> Optional[pulumi.Input['ConnectionProfileMysqlSslArgs']]: @@ -1348,6 +1340,18 @@ def ssl(self) -> Optional[pulumi.Input['ConnectionProfileMysqlSslArgs']]: def ssl(self, value: Optional[pulumi.Input['ConnectionProfileMysqlSslArgs']]): pulumi.set(self, "ssl", value) + @property + @pulumi.getter + def username(self) -> Optional[pulumi.Input[str]]: + """ + The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + """ + return pulumi.get(self, "username") + + @username.setter + def username(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "username", value) + if not MYPY: class ConnectionProfileMysqlSslArgsDict(TypedDict): @@ -1961,134 +1965,108 @@ def __init__(__self__): if not MYPY: class ConnectionProfilePostgresqlArgsDict(TypedDict): - host: pulumi.Input[str] + alloydb_cluster_id: NotRequired[pulumi.Input[str]] """ - Required. The IP or hostname of the source MySQL database. - """ - password: pulumi.Input[str] - """ - Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. - This field is not returned on request, and the value is encrypted when stored in Database Migration Service. - **Note**: This property is sensitive and will not be displayed in the plan. - """ - port: pulumi.Input[int] - """ - Required. The network port of the source MySQL database. - """ - username: pulumi.Input[str] - """ - Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. """ cloud_sql_id: NotRequired[pulumi.Input[str]] """ If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. """ + host: NotRequired[pulumi.Input[str]] + """ + The IP or hostname of the source MySQL database. + """ network_architecture: NotRequired[pulumi.Input[str]] """ (Output) Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with. """ + password: NotRequired[pulumi.Input[str]] + """ + Input only. The password for the user that Database Migration Service will be using to connect to the database. + This field is not returned on request, and the value is encrypted when stored in Database Migration Service. + **Note**: This property is sensitive and will not be displayed in the plan. + """ password_set: NotRequired[pulumi.Input[bool]] """ (Output) Output only. Indicates If this connection profile password is stored. """ + port: NotRequired[pulumi.Input[int]] + """ + The network port of the source MySQL database. + """ ssl: NotRequired[pulumi.Input['ConnectionProfilePostgresqlSslArgsDict']] """ SSL configuration for the destination to connect to the source database. Structure is documented below. """ + username: NotRequired[pulumi.Input[str]] + """ + The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + """ elif False: ConnectionProfilePostgresqlArgsDict: TypeAlias = Mapping[str, Any] @pulumi.input_type class ConnectionProfilePostgresqlArgs: def __init__(__self__, *, - host: pulumi.Input[str], - password: pulumi.Input[str], - port: pulumi.Input[int], - username: pulumi.Input[str], + alloydb_cluster_id: Optional[pulumi.Input[str]] = None, cloud_sql_id: Optional[pulumi.Input[str]] = None, + host: Optional[pulumi.Input[str]] = None, network_architecture: Optional[pulumi.Input[str]] = None, + password: Optional[pulumi.Input[str]] = None, password_set: Optional[pulumi.Input[bool]] = None, - ssl: Optional[pulumi.Input['ConnectionProfilePostgresqlSslArgs']] = None): + port: Optional[pulumi.Input[int]] = None, + ssl: Optional[pulumi.Input['ConnectionProfilePostgresqlSslArgs']] = None, + username: Optional[pulumi.Input[str]] = None): """ - :param pulumi.Input[str] host: Required. The IP or hostname of the source MySQL database. - :param pulumi.Input[str] password: Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. - This field is not returned on request, and the value is encrypted when stored in Database Migration Service. - **Note**: This property is sensitive and will not be displayed in the plan. - :param pulumi.Input[int] port: Required. The network port of the source MySQL database. - :param pulumi.Input[str] username: Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + :param pulumi.Input[str] alloydb_cluster_id: If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. :param pulumi.Input[str] cloud_sql_id: If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. + :param pulumi.Input[str] host: The IP or hostname of the source MySQL database. :param pulumi.Input[str] network_architecture: (Output) Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with. + :param pulumi.Input[str] password: Input only. The password for the user that Database Migration Service will be using to connect to the database. + This field is not returned on request, and the value is encrypted when stored in Database Migration Service. + **Note**: This property is sensitive and will not be displayed in the plan. :param pulumi.Input[bool] password_set: (Output) Output only. Indicates If this connection profile password is stored. + :param pulumi.Input[int] port: The network port of the source MySQL database. :param pulumi.Input['ConnectionProfilePostgresqlSslArgs'] ssl: SSL configuration for the destination to connect to the source database. Structure is documented below. + :param pulumi.Input[str] username: The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. """ - pulumi.set(__self__, "host", host) - pulumi.set(__self__, "password", password) - pulumi.set(__self__, "port", port) - pulumi.set(__self__, "username", username) + if alloydb_cluster_id is not None: + pulumi.set(__self__, "alloydb_cluster_id", alloydb_cluster_id) if cloud_sql_id is not None: pulumi.set(__self__, "cloud_sql_id", cloud_sql_id) + if host is not None: + pulumi.set(__self__, "host", host) if network_architecture is not None: pulumi.set(__self__, "network_architecture", network_architecture) + if password is not None: + pulumi.set(__self__, "password", password) if password_set is not None: pulumi.set(__self__, "password_set", password_set) + if port is not None: + pulumi.set(__self__, "port", port) if ssl is not None: pulumi.set(__self__, "ssl", ssl) + if username is not None: + pulumi.set(__self__, "username", username) @property - @pulumi.getter - def host(self) -> pulumi.Input[str]: + @pulumi.getter(name="alloydbClusterId") + def alloydb_cluster_id(self) -> Optional[pulumi.Input[str]]: """ - Required. The IP or hostname of the source MySQL database. + If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. """ - return pulumi.get(self, "host") + return pulumi.get(self, "alloydb_cluster_id") - @host.setter - def host(self, value: pulumi.Input[str]): - pulumi.set(self, "host", value) - - @property - @pulumi.getter - def password(self) -> pulumi.Input[str]: - """ - Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. - This field is not returned on request, and the value is encrypted when stored in Database Migration Service. - **Note**: This property is sensitive and will not be displayed in the plan. - """ - return pulumi.get(self, "password") - - @password.setter - def password(self, value: pulumi.Input[str]): - pulumi.set(self, "password", value) - - @property - @pulumi.getter - def port(self) -> pulumi.Input[int]: - """ - Required. The network port of the source MySQL database. - """ - return pulumi.get(self, "port") - - @port.setter - def port(self, value: pulumi.Input[int]): - pulumi.set(self, "port", value) - - @property - @pulumi.getter - def username(self) -> pulumi.Input[str]: - """ - Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. - """ - return pulumi.get(self, "username") - - @username.setter - def username(self, value: pulumi.Input[str]): - pulumi.set(self, "username", value) + @alloydb_cluster_id.setter + def alloydb_cluster_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "alloydb_cluster_id", value) @property @pulumi.getter(name="cloudSqlId") @@ -2102,6 +2080,18 @@ def cloud_sql_id(self) -> Optional[pulumi.Input[str]]: def cloud_sql_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cloud_sql_id", value) + @property + @pulumi.getter + def host(self) -> Optional[pulumi.Input[str]]: + """ + The IP or hostname of the source MySQL database. + """ + return pulumi.get(self, "host") + + @host.setter + def host(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "host", value) + @property @pulumi.getter(name="networkArchitecture") def network_architecture(self) -> Optional[pulumi.Input[str]]: @@ -2115,6 +2105,20 @@ def network_architecture(self) -> Optional[pulumi.Input[str]]: def network_architecture(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "network_architecture", value) + @property + @pulumi.getter + def password(self) -> Optional[pulumi.Input[str]]: + """ + Input only. The password for the user that Database Migration Service will be using to connect to the database. + This field is not returned on request, and the value is encrypted when stored in Database Migration Service. + **Note**: This property is sensitive and will not be displayed in the plan. + """ + return pulumi.get(self, "password") + + @password.setter + def password(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "password", value) + @property @pulumi.getter(name="passwordSet") def password_set(self) -> Optional[pulumi.Input[bool]]: @@ -2128,6 +2132,18 @@ def password_set(self) -> Optional[pulumi.Input[bool]]: def password_set(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "password_set", value) + @property + @pulumi.getter + def port(self) -> Optional[pulumi.Input[int]]: + """ + The network port of the source MySQL database. + """ + return pulumi.get(self, "port") + + @port.setter + def port(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "port", value) + @property @pulumi.getter def ssl(self) -> Optional[pulumi.Input['ConnectionProfilePostgresqlSslArgs']]: @@ -2141,6 +2157,18 @@ def ssl(self) -> Optional[pulumi.Input['ConnectionProfilePostgresqlSslArgs']]: def ssl(self, value: Optional[pulumi.Input['ConnectionProfilePostgresqlSslArgs']]): pulumi.set(self, "ssl", value) + @property + @pulumi.getter + def username(self) -> Optional[pulumi.Input[str]]: + """ + The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + """ + return pulumi.get(self, "username") + + @username.setter + def username(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "username", value) + if not MYPY: class ConnectionProfilePostgresqlSslArgsDict(TypedDict): diff --git a/sdk/python/pulumi_gcp/databasemigrationservice/connection_profile.py b/sdk/python/pulumi_gcp/databasemigrationservice/connection_profile.py index 83ea3bd3a2..65e370a90f 100644 --- a/sdk/python/pulumi_gcp/databasemigrationservice/connection_profile.py +++ b/sdk/python/pulumi_gcp/databasemigrationservice/connection_profile.py @@ -734,6 +734,109 @@ def __init__(__self__, }, opts = pulumi.ResourceOptions(depends_on=[vpc_connection])) ``` + ### Database Migration Service Connection Profile Existing Mysql + + ```python + import pulumi + import pulumi_gcp as gcp + + project = gcp.organizations.get_project() + destination_csql = gcp.sql.DatabaseInstance("destination_csql", + name="destination-csql", + database_version="MYSQL_5_7", + settings={ + "tier": "db-n1-standard-1", + "deletion_protection_enabled": False, + }, + deletion_protection=False) + existing_mysql = gcp.databasemigrationservice.ConnectionProfile("existing-mysql", + location="us-central1", + connection_profile_id="destination-cp", + display_name="destination-cp_display", + labels={ + "foo": "bar", + }, + mysql={ + "cloud_sql_id": "destination-csql", + }, + opts = pulumi.ResourceOptions(depends_on=[destination_csql])) + ``` + ### Database Migration Service Connection Profile Existing Postgres + + ```python + import pulumi + import pulumi_gcp as gcp + + project = gcp.organizations.get_project() + destination_csql = gcp.sql.DatabaseInstance("destination_csql", + name="destination-csql", + database_version="POSTGRES_15", + settings={ + "tier": "db-custom-2-13312", + "deletion_protection_enabled": False, + }, + deletion_protection=False) + existing_psql = gcp.databasemigrationservice.ConnectionProfile("existing-psql", + location="us-central1", + connection_profile_id="destination-cp", + display_name="destination-cp_display", + labels={ + "foo": "bar", + }, + postgresql={ + "cloud_sql_id": "destination-csql", + }, + opts = pulumi.ResourceOptions(depends_on=[destination_csql])) + ``` + ### Database Migration Service Connection Profile Existing Alloydb + + ```python + import pulumi + import pulumi_gcp as gcp + + project = gcp.organizations.get_project() + default = gcp.compute.Network("default", name="destination-alloydb") + destination_alloydb = gcp.alloydb.Cluster("destination_alloydb", + cluster_id="destination-alloydb", + location="us-central1", + network_config={ + "network": default.id, + }, + database_version="POSTGRES_15", + initial_user={ + "user": "destination-alloydb", + "password": "destination-alloydb", + }) + private_ip_alloc = gcp.compute.GlobalAddress("private_ip_alloc", + name="destination-alloydb", + address_type="INTERNAL", + purpose="VPC_PEERING", + prefix_length=16, + network=default.id) + vpc_connection = gcp.servicenetworking.Connection("vpc_connection", + network=default.id, + service="servicenetworking.googleapis.com", + reserved_peering_ranges=[private_ip_alloc.name]) + destination_alloydb_primary = gcp.alloydb.Instance("destination_alloydb_primary", + cluster=destination_alloydb.name, + instance_id="destination-alloydb-primary", + instance_type="PRIMARY", + opts = pulumi.ResourceOptions(depends_on=[vpc_connection])) + existing_alloydb = gcp.databasemigrationservice.ConnectionProfile("existing-alloydb", + location="us-central1", + connection_profile_id="destination-cp", + display_name="destination-cp_display", + labels={ + "foo": "bar", + }, + postgresql={ + "alloydb_cluster_id": "destination-alloydb", + }, + opts = pulumi.ResourceOptions(depends_on=[ + destination_alloydb, + destination_alloydb_primary, + ])) + ``` ## Import @@ -991,6 +1094,109 @@ def __init__(__self__, }, opts = pulumi.ResourceOptions(depends_on=[vpc_connection])) ``` + ### Database Migration Service Connection Profile Existing Mysql + + ```python + import pulumi + import pulumi_gcp as gcp + + project = gcp.organizations.get_project() + destination_csql = gcp.sql.DatabaseInstance("destination_csql", + name="destination-csql", + database_version="MYSQL_5_7", + settings={ + "tier": "db-n1-standard-1", + "deletion_protection_enabled": False, + }, + deletion_protection=False) + existing_mysql = gcp.databasemigrationservice.ConnectionProfile("existing-mysql", + location="us-central1", + connection_profile_id="destination-cp", + display_name="destination-cp_display", + labels={ + "foo": "bar", + }, + mysql={ + "cloud_sql_id": "destination-csql", + }, + opts = pulumi.ResourceOptions(depends_on=[destination_csql])) + ``` + ### Database Migration Service Connection Profile Existing Postgres + + ```python + import pulumi + import pulumi_gcp as gcp + + project = gcp.organizations.get_project() + destination_csql = gcp.sql.DatabaseInstance("destination_csql", + name="destination-csql", + database_version="POSTGRES_15", + settings={ + "tier": "db-custom-2-13312", + "deletion_protection_enabled": False, + }, + deletion_protection=False) + existing_psql = gcp.databasemigrationservice.ConnectionProfile("existing-psql", + location="us-central1", + connection_profile_id="destination-cp", + display_name="destination-cp_display", + labels={ + "foo": "bar", + }, + postgresql={ + "cloud_sql_id": "destination-csql", + }, + opts = pulumi.ResourceOptions(depends_on=[destination_csql])) + ``` + ### Database Migration Service Connection Profile Existing Alloydb + + ```python + import pulumi + import pulumi_gcp as gcp + + project = gcp.organizations.get_project() + default = gcp.compute.Network("default", name="destination-alloydb") + destination_alloydb = gcp.alloydb.Cluster("destination_alloydb", + cluster_id="destination-alloydb", + location="us-central1", + network_config={ + "network": default.id, + }, + database_version="POSTGRES_15", + initial_user={ + "user": "destination-alloydb", + "password": "destination-alloydb", + }) + private_ip_alloc = gcp.compute.GlobalAddress("private_ip_alloc", + name="destination-alloydb", + address_type="INTERNAL", + purpose="VPC_PEERING", + prefix_length=16, + network=default.id) + vpc_connection = gcp.servicenetworking.Connection("vpc_connection", + network=default.id, + service="servicenetworking.googleapis.com", + reserved_peering_ranges=[private_ip_alloc.name]) + destination_alloydb_primary = gcp.alloydb.Instance("destination_alloydb_primary", + cluster=destination_alloydb.name, + instance_id="destination-alloydb-primary", + instance_type="PRIMARY", + opts = pulumi.ResourceOptions(depends_on=[vpc_connection])) + existing_alloydb = gcp.databasemigrationservice.ConnectionProfile("existing-alloydb", + location="us-central1", + connection_profile_id="destination-cp", + display_name="destination-cp_display", + labels={ + "foo": "bar", + }, + postgresql={ + "alloydb_cluster_id": "destination-alloydb", + }, + opts = pulumi.ResourceOptions(depends_on=[ + destination_alloydb, + destination_alloydb_primary, + ])) + ``` ## Import diff --git a/sdk/python/pulumi_gcp/databasemigrationservice/outputs.py b/sdk/python/pulumi_gcp/databasemigrationservice/outputs.py index 021614dc22..f5d3210872 100644 --- a/sdk/python/pulumi_gcp/databasemigrationservice/outputs.py +++ b/sdk/python/pulumi_gcp/databasemigrationservice/outputs.py @@ -932,78 +932,66 @@ def get(self, key: str, default = None) -> Any: return super().get(key, default) def __init__(__self__, *, - host: str, - password: str, - port: int, - username: str, cloud_sql_id: Optional[str] = None, + host: Optional[str] = None, + password: Optional[str] = None, password_set: Optional[bool] = None, - ssl: Optional['outputs.ConnectionProfileMysqlSsl'] = None): + port: Optional[int] = None, + ssl: Optional['outputs.ConnectionProfileMysqlSsl'] = None, + username: Optional[str] = None): """ - :param str host: Required. The IP or hostname of the source MySQL database. - :param str password: Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. + :param str cloud_sql_id: If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. + :param str host: The IP or hostname of the source MySQL database. + :param str password: Input only. The password for the user that Database Migration Service will be using to connect to the database. This field is not returned on request, and the value is encrypted when stored in Database Migration Service. **Note**: This property is sensitive and will not be displayed in the plan. - :param int port: Required. The network port of the source MySQL database. - :param str username: Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. - :param str cloud_sql_id: If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. :param bool password_set: (Output) Output only. Indicates If this connection profile password is stored. + :param int port: The network port of the source MySQL database. :param 'ConnectionProfileMysqlSslArgs' ssl: SSL configuration for the destination to connect to the source database. Structure is documented below. + :param str username: The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. """ - pulumi.set(__self__, "host", host) - pulumi.set(__self__, "password", password) - pulumi.set(__self__, "port", port) - pulumi.set(__self__, "username", username) if cloud_sql_id is not None: pulumi.set(__self__, "cloud_sql_id", cloud_sql_id) + if host is not None: + pulumi.set(__self__, "host", host) + if password is not None: + pulumi.set(__self__, "password", password) if password_set is not None: pulumi.set(__self__, "password_set", password_set) + if port is not None: + pulumi.set(__self__, "port", port) if ssl is not None: pulumi.set(__self__, "ssl", ssl) + if username is not None: + pulumi.set(__self__, "username", username) @property - @pulumi.getter - def host(self) -> str: - """ - Required. The IP or hostname of the source MySQL database. - """ - return pulumi.get(self, "host") - - @property - @pulumi.getter - def password(self) -> str: + @pulumi.getter(name="cloudSqlId") + def cloud_sql_id(self) -> Optional[str]: """ - Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. - This field is not returned on request, and the value is encrypted when stored in Database Migration Service. - **Note**: This property is sensitive and will not be displayed in the plan. + If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. """ - return pulumi.get(self, "password") + return pulumi.get(self, "cloud_sql_id") @property @pulumi.getter - def port(self) -> int: + def host(self) -> Optional[str]: """ - Required. The network port of the source MySQL database. + The IP or hostname of the source MySQL database. """ - return pulumi.get(self, "port") + return pulumi.get(self, "host") @property @pulumi.getter - def username(self) -> str: - """ - Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. - """ - return pulumi.get(self, "username") - - @property - @pulumi.getter(name="cloudSqlId") - def cloud_sql_id(self) -> Optional[str]: + def password(self) -> Optional[str]: """ - If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. + Input only. The password for the user that Database Migration Service will be using to connect to the database. + This field is not returned on request, and the value is encrypted when stored in Database Migration Service. + **Note**: This property is sensitive and will not be displayed in the plan. """ - return pulumi.get(self, "cloud_sql_id") + return pulumi.get(self, "password") @property @pulumi.getter(name="passwordSet") @@ -1014,6 +1002,14 @@ def password_set(self) -> Optional[bool]: """ return pulumi.get(self, "password_set") + @property + @pulumi.getter + def port(self) -> Optional[int]: + """ + The network port of the source MySQL database. + """ + return pulumi.get(self, "port") + @property @pulumi.getter def ssl(self) -> Optional['outputs.ConnectionProfileMysqlSsl']: @@ -1023,6 +1019,14 @@ def ssl(self) -> Optional['outputs.ConnectionProfileMysqlSsl']: """ return pulumi.get(self, "ssl") + @property + @pulumi.getter + def username(self) -> Optional[str]: + """ + The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + """ + return pulumi.get(self, "username") + @pulumi.output_type class ConnectionProfileMysqlSsl(dict): @@ -1494,7 +1498,9 @@ class ConnectionProfilePostgresql(dict): @staticmethod def __key_warning(key: str): suggest = None - if key == "cloudSqlId": + if key == "alloydbClusterId": + suggest = "alloydb_cluster_id" + elif key == "cloudSqlId": suggest = "cloud_sql_id" elif key == "networkArchitecture": suggest = "network_architecture" @@ -1513,83 +1519,73 @@ def get(self, key: str, default = None) -> Any: return super().get(key, default) def __init__(__self__, *, - host: str, - password: str, - port: int, - username: str, + alloydb_cluster_id: Optional[str] = None, cloud_sql_id: Optional[str] = None, + host: Optional[str] = None, network_architecture: Optional[str] = None, + password: Optional[str] = None, password_set: Optional[bool] = None, - ssl: Optional['outputs.ConnectionProfilePostgresqlSsl'] = None): + port: Optional[int] = None, + ssl: Optional['outputs.ConnectionProfilePostgresqlSsl'] = None, + username: Optional[str] = None): """ - :param str host: Required. The IP or hostname of the source MySQL database. - :param str password: Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. - This field is not returned on request, and the value is encrypted when stored in Database Migration Service. - **Note**: This property is sensitive and will not be displayed in the plan. - :param int port: Required. The network port of the source MySQL database. - :param str username: Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + :param str alloydb_cluster_id: If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. :param str cloud_sql_id: If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. + :param str host: The IP or hostname of the source MySQL database. :param str network_architecture: (Output) Output only. If the source is a Cloud SQL database, this field indicates the network architecture it's associated with. + :param str password: Input only. The password for the user that Database Migration Service will be using to connect to the database. + This field is not returned on request, and the value is encrypted when stored in Database Migration Service. + **Note**: This property is sensitive and will not be displayed in the plan. :param bool password_set: (Output) Output only. Indicates If this connection profile password is stored. + :param int port: The network port of the source MySQL database. :param 'ConnectionProfilePostgresqlSslArgs' ssl: SSL configuration for the destination to connect to the source database. Structure is documented below. + :param str username: The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. """ - pulumi.set(__self__, "host", host) - pulumi.set(__self__, "password", password) - pulumi.set(__self__, "port", port) - pulumi.set(__self__, "username", username) + if alloydb_cluster_id is not None: + pulumi.set(__self__, "alloydb_cluster_id", alloydb_cluster_id) if cloud_sql_id is not None: pulumi.set(__self__, "cloud_sql_id", cloud_sql_id) + if host is not None: + pulumi.set(__self__, "host", host) if network_architecture is not None: pulumi.set(__self__, "network_architecture", network_architecture) + if password is not None: + pulumi.set(__self__, "password", password) if password_set is not None: pulumi.set(__self__, "password_set", password_set) + if port is not None: + pulumi.set(__self__, "port", port) if ssl is not None: pulumi.set(__self__, "ssl", ssl) + if username is not None: + pulumi.set(__self__, "username", username) @property - @pulumi.getter - def host(self) -> str: + @pulumi.getter(name="alloydbClusterId") + def alloydb_cluster_id(self) -> Optional[str]: """ - Required. The IP or hostname of the source MySQL database. + If the connected database is an AlloyDB instance, use this field to provide the AlloyDB cluster ID. """ - return pulumi.get(self, "host") + return pulumi.get(self, "alloydb_cluster_id") @property - @pulumi.getter - def password(self) -> str: - """ - Required. Input only. The password for the user that Database Migration Service will be using to connect to the database. - This field is not returned on request, and the value is encrypted when stored in Database Migration Service. - **Note**: This property is sensitive and will not be displayed in the plan. - """ - return pulumi.get(self, "password") - - @property - @pulumi.getter - def port(self) -> int: + @pulumi.getter(name="cloudSqlId") + def cloud_sql_id(self) -> Optional[str]: """ - Required. The network port of the source MySQL database. + If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. """ - return pulumi.get(self, "port") + return pulumi.get(self, "cloud_sql_id") @property @pulumi.getter - def username(self) -> str: + def host(self) -> Optional[str]: """ - Required. The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + The IP or hostname of the source MySQL database. """ - return pulumi.get(self, "username") - - @property - @pulumi.getter(name="cloudSqlId") - def cloud_sql_id(self) -> Optional[str]: - """ - If the source is a Cloud SQL database, use this field to provide the Cloud SQL instance ID of the source. - """ - return pulumi.get(self, "cloud_sql_id") + return pulumi.get(self, "host") @property @pulumi.getter(name="networkArchitecture") @@ -1600,6 +1596,16 @@ def network_architecture(self) -> Optional[str]: """ return pulumi.get(self, "network_architecture") + @property + @pulumi.getter + def password(self) -> Optional[str]: + """ + Input only. The password for the user that Database Migration Service will be using to connect to the database. + This field is not returned on request, and the value is encrypted when stored in Database Migration Service. + **Note**: This property is sensitive and will not be displayed in the plan. + """ + return pulumi.get(self, "password") + @property @pulumi.getter(name="passwordSet") def password_set(self) -> Optional[bool]: @@ -1609,6 +1615,14 @@ def password_set(self) -> Optional[bool]: """ return pulumi.get(self, "password_set") + @property + @pulumi.getter + def port(self) -> Optional[int]: + """ + The network port of the source MySQL database. + """ + return pulumi.get(self, "port") + @property @pulumi.getter def ssl(self) -> Optional['outputs.ConnectionProfilePostgresqlSsl']: @@ -1618,6 +1632,14 @@ def ssl(self) -> Optional['outputs.ConnectionProfilePostgresqlSsl']: """ return pulumi.get(self, "ssl") + @property + @pulumi.getter + def username(self) -> Optional[str]: + """ + The username that Database Migration Service will use to connect to the database. The value is encrypted when stored in Database Migration Service. + """ + return pulumi.get(self, "username") + @pulumi.output_type class ConnectionProfilePostgresqlSsl(dict): diff --git a/sdk/python/pulumi_gcp/dataloss/_inputs.py b/sdk/python/pulumi_gcp/dataloss/_inputs.py index 7b4d201267..7e382fa1ad 100644 --- a/sdk/python/pulumi_gcp/dataloss/_inputs.py +++ b/sdk/python/pulumi_gcp/dataloss/_inputs.py @@ -425,6 +425,14 @@ 'PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsArgsDict', 'PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArgs', 'PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsConditionArgsDict', + 'PreventionDiscoveryConfigActionTagResourcesArgs', + 'PreventionDiscoveryConfigActionTagResourcesArgsDict', + 'PreventionDiscoveryConfigActionTagResourcesTagConditionArgs', + 'PreventionDiscoveryConfigActionTagResourcesTagConditionArgsDict', + 'PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs', + 'PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgsDict', + 'PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs', + 'PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgsDict', 'PreventionDiscoveryConfigErrorArgs', 'PreventionDiscoveryConfigErrorArgsDict', 'PreventionDiscoveryConfigErrorDetailsArgs', @@ -439,6 +447,8 @@ 'PreventionDiscoveryConfigTargetBigQueryTargetArgsDict', 'PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs', 'PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgsDict', + 'PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs', + 'PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgsDict', 'PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadenceArgs', 'PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadenceArgsDict', 'PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadenceArgs', @@ -483,6 +493,8 @@ 'PreventionDiscoveryConfigTargetCloudSqlTargetFilterOthersArgsDict', 'PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs', 'PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgsDict', + 'PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs', + 'PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgsDict', 'PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadenceArgs', 'PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadenceArgsDict', 'PreventionDiscoveryConfigTargetCloudStorageTargetArgs', @@ -15366,6 +15378,11 @@ class PreventionDiscoveryConfigActionArgsDict(TypedDict): Publish a message into the Pub/Sub topic. Structure is documented below. """ + tag_resources: NotRequired[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesArgsDict']] + """ + Publish a message into the Pub/Sub topic. + Structure is documented below. + """ elif False: PreventionDiscoveryConfigActionArgsDict: TypeAlias = Mapping[str, Any] @@ -15373,17 +15390,22 @@ class PreventionDiscoveryConfigActionArgsDict(TypedDict): class PreventionDiscoveryConfigActionArgs: def __init__(__self__, *, export_data: Optional[pulumi.Input['PreventionDiscoveryConfigActionExportDataArgs']] = None, - pub_sub_notification: Optional[pulumi.Input['PreventionDiscoveryConfigActionPubSubNotificationArgs']] = None): + pub_sub_notification: Optional[pulumi.Input['PreventionDiscoveryConfigActionPubSubNotificationArgs']] = None, + tag_resources: Optional[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesArgs']] = None): """ :param pulumi.Input['PreventionDiscoveryConfigActionExportDataArgs'] export_data: Export data profiles into a provided location Structure is documented below. :param pulumi.Input['PreventionDiscoveryConfigActionPubSubNotificationArgs'] pub_sub_notification: Publish a message into the Pub/Sub topic. Structure is documented below. + :param pulumi.Input['PreventionDiscoveryConfigActionTagResourcesArgs'] tag_resources: Publish a message into the Pub/Sub topic. + Structure is documented below. """ if export_data is not None: pulumi.set(__self__, "export_data", export_data) if pub_sub_notification is not None: pulumi.set(__self__, "pub_sub_notification", pub_sub_notification) + if tag_resources is not None: + pulumi.set(__self__, "tag_resources", tag_resources) @property @pulumi.getter(name="exportData") @@ -15411,6 +15433,19 @@ def pub_sub_notification(self) -> Optional[pulumi.Input['PreventionDiscoveryConf def pub_sub_notification(self, value: Optional[pulumi.Input['PreventionDiscoveryConfigActionPubSubNotificationArgs']]): pulumi.set(self, "pub_sub_notification", value) + @property + @pulumi.getter(name="tagResources") + def tag_resources(self) -> Optional[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesArgs']]: + """ + Publish a message into the Pub/Sub topic. + Structure is documented below. + """ + return pulumi.get(self, "tag_resources") + + @tag_resources.setter + def tag_resources(self, value: Optional[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesArgs']]): + pulumi.set(self, "tag_resources", value) + if not MYPY: class PreventionDiscoveryConfigActionExportDataArgsDict(TypedDict): @@ -15771,6 +15806,208 @@ def minimum_sensitivity_score(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "minimum_sensitivity_score", value) +if not MYPY: + class PreventionDiscoveryConfigActionTagResourcesArgsDict(TypedDict): + lower_data_risk_to_low: NotRequired[pulumi.Input[bool]] + """ + Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + """ + profile_generations_to_tags: NotRequired[pulumi.Input[Sequence[pulumi.Input[str]]]] + """ + The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + """ + tag_conditions: NotRequired[pulumi.Input[Sequence[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionArgsDict']]]] + """ + The tags to associate with different conditions. + Structure is documented below. + """ +elif False: + PreventionDiscoveryConfigActionTagResourcesArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class PreventionDiscoveryConfigActionTagResourcesArgs: + def __init__(__self__, *, + lower_data_risk_to_low: Optional[pulumi.Input[bool]] = None, + profile_generations_to_tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + tag_conditions: Optional[pulumi.Input[Sequence[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionArgs']]]] = None): + """ + :param pulumi.Input[bool] lower_data_risk_to_low: Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + :param pulumi.Input[Sequence[pulumi.Input[str]]] profile_generations_to_tags: The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + :param pulumi.Input[Sequence[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionArgs']]] tag_conditions: The tags to associate with different conditions. + Structure is documented below. + """ + if lower_data_risk_to_low is not None: + pulumi.set(__self__, "lower_data_risk_to_low", lower_data_risk_to_low) + if profile_generations_to_tags is not None: + pulumi.set(__self__, "profile_generations_to_tags", profile_generations_to_tags) + if tag_conditions is not None: + pulumi.set(__self__, "tag_conditions", tag_conditions) + + @property + @pulumi.getter(name="lowerDataRiskToLow") + def lower_data_risk_to_low(self) -> Optional[pulumi.Input[bool]]: + """ + Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + """ + return pulumi.get(self, "lower_data_risk_to_low") + + @lower_data_risk_to_low.setter + def lower_data_risk_to_low(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "lower_data_risk_to_low", value) + + @property + @pulumi.getter(name="profileGenerationsToTags") + def profile_generations_to_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: + """ + The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + """ + return pulumi.get(self, "profile_generations_to_tags") + + @profile_generations_to_tags.setter + def profile_generations_to_tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): + pulumi.set(self, "profile_generations_to_tags", value) + + @property + @pulumi.getter(name="tagConditions") + def tag_conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionArgs']]]]: + """ + The tags to associate with different conditions. + Structure is documented below. + """ + return pulumi.get(self, "tag_conditions") + + @tag_conditions.setter + def tag_conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionArgs']]]]): + pulumi.set(self, "tag_conditions", value) + + +if not MYPY: + class PreventionDiscoveryConfigActionTagResourcesTagConditionArgsDict(TypedDict): + sensitivity_score: NotRequired[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgsDict']] + """ + Conditions attaching the tag to a resource on its profile having this sensitivity score. + Structure is documented below. + """ + tag: NotRequired[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgsDict']] + """ + The tag value to attach to resources. + Structure is documented below. + """ +elif False: + PreventionDiscoveryConfigActionTagResourcesTagConditionArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class PreventionDiscoveryConfigActionTagResourcesTagConditionArgs: + def __init__(__self__, *, + sensitivity_score: Optional[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs']] = None, + tag: Optional[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs']] = None): + """ + :param pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs'] sensitivity_score: Conditions attaching the tag to a resource on its profile having this sensitivity score. + Structure is documented below. + :param pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs'] tag: The tag value to attach to resources. + Structure is documented below. + """ + if sensitivity_score is not None: + pulumi.set(__self__, "sensitivity_score", sensitivity_score) + if tag is not None: + pulumi.set(__self__, "tag", tag) + + @property + @pulumi.getter(name="sensitivityScore") + def sensitivity_score(self) -> Optional[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs']]: + """ + Conditions attaching the tag to a resource on its profile having this sensitivity score. + Structure is documented below. + """ + return pulumi.get(self, "sensitivity_score") + + @sensitivity_score.setter + def sensitivity_score(self, value: Optional[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs']]): + pulumi.set(self, "sensitivity_score", value) + + @property + @pulumi.getter + def tag(self) -> Optional[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs']]: + """ + The tag value to attach to resources. + Structure is documented below. + """ + return pulumi.get(self, "tag") + + @tag.setter + def tag(self, value: Optional[pulumi.Input['PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs']]): + pulumi.set(self, "tag", value) + + +if not MYPY: + class PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgsDict(TypedDict): + score: pulumi.Input[str] + """ + The sensitivity score applied to the resource. + Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + """ +elif False: + PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs: + def __init__(__self__, *, + score: pulumi.Input[str]): + """ + :param pulumi.Input[str] score: The sensitivity score applied to the resource. + Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + """ + pulumi.set(__self__, "score", score) + + @property + @pulumi.getter + def score(self) -> pulumi.Input[str]: + """ + The sensitivity score applied to the resource. + Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + """ + return pulumi.get(self, "score") + + @score.setter + def score(self, value: pulumi.Input[str]): + pulumi.set(self, "score", value) + + +if not MYPY: + class PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgsDict(TypedDict): + namespaced_value: NotRequired[pulumi.Input[str]] + """ + The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + """ +elif False: + PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs: + def __init__(__self__, *, + namespaced_value: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] namespaced_value: The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + """ + if namespaced_value is not None: + pulumi.set(__self__, "namespaced_value", namespaced_value) + + @property + @pulumi.getter(name="namespacedValue") + def namespaced_value(self) -> Optional[pulumi.Input[str]]: + """ + The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + """ + return pulumi.get(self, "namespaced_value") + + @namespaced_value.setter + def namespaced_value(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "namespaced_value", value) + + if not MYPY: class PreventionDiscoveryConfigErrorArgsDict(TypedDict): details: NotRequired[pulumi.Input['PreventionDiscoveryConfigErrorDetailsArgsDict']] @@ -16206,6 +16443,11 @@ def filter(self, value: Optional[pulumi.Input['PreventionDiscoveryConfigTargetBi if not MYPY: class PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgsDict(TypedDict): + inspect_template_modified_cadence: NotRequired[pulumi.Input['PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgsDict']] + """ + Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + Structure is documented below. + """ schema_modified_cadence: NotRequired[pulumi.Input['PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadenceArgsDict']] """ Governs when to update data profiles when a schema is modified @@ -16222,19 +16464,37 @@ class PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgsDict(TypedDict): @pulumi.input_type class PreventionDiscoveryConfigTargetBigQueryTargetCadenceArgs: def __init__(__self__, *, + inspect_template_modified_cadence: Optional[pulumi.Input['PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs']] = None, schema_modified_cadence: Optional[pulumi.Input['PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadenceArgs']] = None, table_modified_cadence: Optional[pulumi.Input['PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadenceArgs']] = None): """ + :param pulumi.Input['PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs'] inspect_template_modified_cadence: Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + Structure is documented below. :param pulumi.Input['PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadenceArgs'] schema_modified_cadence: Governs when to update data profiles when a schema is modified Structure is documented below. :param pulumi.Input['PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadenceArgs'] table_modified_cadence: Governs when to update profile when a table is modified. Structure is documented below. """ + if inspect_template_modified_cadence is not None: + pulumi.set(__self__, "inspect_template_modified_cadence", inspect_template_modified_cadence) if schema_modified_cadence is not None: pulumi.set(__self__, "schema_modified_cadence", schema_modified_cadence) if table_modified_cadence is not None: pulumi.set(__self__, "table_modified_cadence", table_modified_cadence) + @property + @pulumi.getter(name="inspectTemplateModifiedCadence") + def inspect_template_modified_cadence(self) -> Optional[pulumi.Input['PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs']]: + """ + Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + Structure is documented below. + """ + return pulumi.get(self, "inspect_template_modified_cadence") + + @inspect_template_modified_cadence.setter + def inspect_template_modified_cadence(self, value: Optional[pulumi.Input['PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs']]): + pulumi.set(self, "inspect_template_modified_cadence", value) + @property @pulumi.getter(name="schemaModifiedCadence") def schema_modified_cadence(self) -> Optional[pulumi.Input['PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadenceArgs']]: @@ -16262,6 +16522,41 @@ def table_modified_cadence(self, value: Optional[pulumi.Input['PreventionDiscove pulumi.set(self, "table_modified_cadence", value) +if not MYPY: + class PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgsDict(TypedDict): + frequency: NotRequired[pulumi.Input[str]] + """ + How frequently data profiles can be updated when the template is modified. Defaults to never. + Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + """ +elif False: + PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs: + def __init__(__self__, *, + frequency: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] frequency: How frequently data profiles can be updated when the template is modified. Defaults to never. + Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + """ + if frequency is not None: + pulumi.set(__self__, "frequency", frequency) + + @property + @pulumi.getter + def frequency(self) -> Optional[pulumi.Input[str]]: + """ + How frequently data profiles can be updated when the template is modified. Defaults to never. + Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + """ + return pulumi.get(self, "frequency") + + @frequency.setter + def frequency(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "frequency", value) + + if not MYPY: class PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadenceArgsDict(TypedDict): frequency: NotRequired[pulumi.Input[str]] @@ -17372,6 +17667,11 @@ def __init__(__self__): if not MYPY: class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgsDict(TypedDict): + inspect_template_modified_cadence: NotRequired[pulumi.Input['PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgsDict']] + """ + Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + Structure is documented below. + """ refresh_frequency: NotRequired[pulumi.Input[str]] """ Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. @@ -17388,19 +17688,37 @@ class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgsDict(Typ @pulumi.input_type class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceArgs: def __init__(__self__, *, + inspect_template_modified_cadence: Optional[pulumi.Input['PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs']] = None, refresh_frequency: Optional[pulumi.Input[str]] = None, schema_modified_cadence: Optional[pulumi.Input['PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadenceArgs']] = None): """ + :param pulumi.Input['PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs'] inspect_template_modified_cadence: Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + Structure is documented below. :param pulumi.Input[str] refresh_frequency: Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. :param pulumi.Input['PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadenceArgs'] schema_modified_cadence: Governs when to update data profiles when a schema is modified Structure is documented below. """ + if inspect_template_modified_cadence is not None: + pulumi.set(__self__, "inspect_template_modified_cadence", inspect_template_modified_cadence) if refresh_frequency is not None: pulumi.set(__self__, "refresh_frequency", refresh_frequency) if schema_modified_cadence is not None: pulumi.set(__self__, "schema_modified_cadence", schema_modified_cadence) + @property + @pulumi.getter(name="inspectTemplateModifiedCadence") + def inspect_template_modified_cadence(self) -> Optional[pulumi.Input['PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs']]: + """ + Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + Structure is documented below. + """ + return pulumi.get(self, "inspect_template_modified_cadence") + + @inspect_template_modified_cadence.setter + def inspect_template_modified_cadence(self, value: Optional[pulumi.Input['PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs']]): + pulumi.set(self, "inspect_template_modified_cadence", value) + @property @pulumi.getter(name="refreshFrequency") def refresh_frequency(self) -> Optional[pulumi.Input[str]]: @@ -17428,6 +17746,40 @@ def schema_modified_cadence(self, value: Optional[pulumi.Input['PreventionDiscov pulumi.set(self, "schema_modified_cadence", value) +if not MYPY: + class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgsDict(TypedDict): + frequency: pulumi.Input[str] + """ + How frequently data profiles can be updated when the template is modified. Defaults to never. + Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + """ +elif False: + PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs: + def __init__(__self__, *, + frequency: pulumi.Input[str]): + """ + :param pulumi.Input[str] frequency: How frequently data profiles can be updated when the template is modified. Defaults to never. + Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + """ + pulumi.set(__self__, "frequency", frequency) + + @property + @pulumi.getter + def frequency(self) -> pulumi.Input[str]: + """ + How frequently data profiles can be updated when the template is modified. Defaults to never. + Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + """ + return pulumi.get(self, "frequency") + + @frequency.setter + def frequency(self, value: pulumi.Input[str]): + pulumi.set(self, "frequency", value) + + if not MYPY: class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadenceArgsDict(TypedDict): frequency: NotRequired[pulumi.Input[str]] diff --git a/sdk/python/pulumi_gcp/dataloss/outputs.py b/sdk/python/pulumi_gcp/dataloss/outputs.py index 5af31f069e..a6b0ce7a87 100644 --- a/sdk/python/pulumi_gcp/dataloss/outputs.py +++ b/sdk/python/pulumi_gcp/dataloss/outputs.py @@ -221,6 +221,10 @@ 'PreventionDiscoveryConfigActionPubSubNotificationPubsubCondition', 'PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressions', 'PreventionDiscoveryConfigActionPubSubNotificationPubsubConditionExpressionsCondition', + 'PreventionDiscoveryConfigActionTagResources', + 'PreventionDiscoveryConfigActionTagResourcesTagCondition', + 'PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore', + 'PreventionDiscoveryConfigActionTagResourcesTagConditionTag', 'PreventionDiscoveryConfigError', 'PreventionDiscoveryConfigErrorDetails', 'PreventionDiscoveryConfigOrgConfig', @@ -228,6 +232,7 @@ 'PreventionDiscoveryConfigTarget', 'PreventionDiscoveryConfigTargetBigQueryTarget', 'PreventionDiscoveryConfigTargetBigQueryTargetCadence', + 'PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence', 'PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence', 'PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadence', 'PreventionDiscoveryConfigTargetBigQueryTargetConditions', @@ -250,6 +255,7 @@ 'PreventionDiscoveryConfigTargetCloudSqlTargetFilterDatabaseResourceReference', 'PreventionDiscoveryConfigTargetCloudSqlTargetFilterOthers', 'PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence', + 'PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence', 'PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence', 'PreventionDiscoveryConfigTargetCloudStorageTarget', 'PreventionDiscoveryConfigTargetCloudStorageTargetConditions', @@ -11231,6 +11237,8 @@ def __key_warning(key: str): suggest = "export_data" elif key == "pubSubNotification": suggest = "pub_sub_notification" + elif key == "tagResources": + suggest = "tag_resources" if suggest: pulumi.log.warn(f"Key '{key}' not found in PreventionDiscoveryConfigAction. Access the value via the '{suggest}' property getter instead.") @@ -11245,17 +11253,22 @@ def get(self, key: str, default = None) -> Any: def __init__(__self__, *, export_data: Optional['outputs.PreventionDiscoveryConfigActionExportData'] = None, - pub_sub_notification: Optional['outputs.PreventionDiscoveryConfigActionPubSubNotification'] = None): + pub_sub_notification: Optional['outputs.PreventionDiscoveryConfigActionPubSubNotification'] = None, + tag_resources: Optional['outputs.PreventionDiscoveryConfigActionTagResources'] = None): """ :param 'PreventionDiscoveryConfigActionExportDataArgs' export_data: Export data profiles into a provided location Structure is documented below. :param 'PreventionDiscoveryConfigActionPubSubNotificationArgs' pub_sub_notification: Publish a message into the Pub/Sub topic. Structure is documented below. + :param 'PreventionDiscoveryConfigActionTagResourcesArgs' tag_resources: Publish a message into the Pub/Sub topic. + Structure is documented below. """ if export_data is not None: pulumi.set(__self__, "export_data", export_data) if pub_sub_notification is not None: pulumi.set(__self__, "pub_sub_notification", pub_sub_notification) + if tag_resources is not None: + pulumi.set(__self__, "tag_resources", tag_resources) @property @pulumi.getter(name="exportData") @@ -11275,6 +11288,15 @@ def pub_sub_notification(self) -> Optional['outputs.PreventionDiscoveryConfigAct """ return pulumi.get(self, "pub_sub_notification") + @property + @pulumi.getter(name="tagResources") + def tag_resources(self) -> Optional['outputs.PreventionDiscoveryConfigActionTagResources']: + """ + Publish a message into the Pub/Sub topic. + Structure is documented below. + """ + return pulumi.get(self, "tag_resources") + @pulumi.output_type class PreventionDiscoveryConfigActionExportData(dict): @@ -11585,6 +11607,182 @@ def minimum_sensitivity_score(self) -> Optional[str]: return pulumi.get(self, "minimum_sensitivity_score") +@pulumi.output_type +class PreventionDiscoveryConfigActionTagResources(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "lowerDataRiskToLow": + suggest = "lower_data_risk_to_low" + elif key == "profileGenerationsToTags": + suggest = "profile_generations_to_tags" + elif key == "tagConditions": + suggest = "tag_conditions" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in PreventionDiscoveryConfigActionTagResources. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + PreventionDiscoveryConfigActionTagResources.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + PreventionDiscoveryConfigActionTagResources.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + lower_data_risk_to_low: Optional[bool] = None, + profile_generations_to_tags: Optional[Sequence[str]] = None, + tag_conditions: Optional[Sequence['outputs.PreventionDiscoveryConfigActionTagResourcesTagCondition']] = None): + """ + :param bool lower_data_risk_to_low: Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + :param Sequence[str] profile_generations_to_tags: The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + :param Sequence['PreventionDiscoveryConfigActionTagResourcesTagConditionArgs'] tag_conditions: The tags to associate with different conditions. + Structure is documented below. + """ + if lower_data_risk_to_low is not None: + pulumi.set(__self__, "lower_data_risk_to_low", lower_data_risk_to_low) + if profile_generations_to_tags is not None: + pulumi.set(__self__, "profile_generations_to_tags", profile_generations_to_tags) + if tag_conditions is not None: + pulumi.set(__self__, "tag_conditions", tag_conditions) + + @property + @pulumi.getter(name="lowerDataRiskToLow") + def lower_data_risk_to_low(self) -> Optional[bool]: + """ + Whether applying a tag to a resource should lower the risk of the profile for that resource. For example, in conjunction with an [IAM deny policy](https://cloud.google.com/iam/docs/deny-overview), you can deny all principals a permission if a tag value is present, mitigating the risk of the resource. This also lowers the data risk of resources at the lower levels of the resource hierarchy. For example, reducing the data risk of a table data profile also reduces the data risk of the constituent column data profiles. + """ + return pulumi.get(self, "lower_data_risk_to_low") + + @property + @pulumi.getter(name="profileGenerationsToTags") + def profile_generations_to_tags(self) -> Optional[Sequence[str]]: + """ + The profile generations for which the tag should be attached to resources. If you attach a tag to only new profiles, then if the sensitivity score of a profile subsequently changes, its tag doesn't change. By default, this field includes only new profiles. To include both new and updated profiles for tagging, this field should explicitly include both `PROFILE_GENERATION_NEW` and `PROFILE_GENERATION_UPDATE`. + Each value may be one of: `PROFILE_GENERATION_NEW`, `PROFILE_GENERATION_UPDATE`. + """ + return pulumi.get(self, "profile_generations_to_tags") + + @property + @pulumi.getter(name="tagConditions") + def tag_conditions(self) -> Optional[Sequence['outputs.PreventionDiscoveryConfigActionTagResourcesTagCondition']]: + """ + The tags to associate with different conditions. + Structure is documented below. + """ + return pulumi.get(self, "tag_conditions") + + +@pulumi.output_type +class PreventionDiscoveryConfigActionTagResourcesTagCondition(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "sensitivityScore": + suggest = "sensitivity_score" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in PreventionDiscoveryConfigActionTagResourcesTagCondition. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + PreventionDiscoveryConfigActionTagResourcesTagCondition.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + PreventionDiscoveryConfigActionTagResourcesTagCondition.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + sensitivity_score: Optional['outputs.PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore'] = None, + tag: Optional['outputs.PreventionDiscoveryConfigActionTagResourcesTagConditionTag'] = None): + """ + :param 'PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScoreArgs' sensitivity_score: Conditions attaching the tag to a resource on its profile having this sensitivity score. + Structure is documented below. + :param 'PreventionDiscoveryConfigActionTagResourcesTagConditionTagArgs' tag: The tag value to attach to resources. + Structure is documented below. + """ + if sensitivity_score is not None: + pulumi.set(__self__, "sensitivity_score", sensitivity_score) + if tag is not None: + pulumi.set(__self__, "tag", tag) + + @property + @pulumi.getter(name="sensitivityScore") + def sensitivity_score(self) -> Optional['outputs.PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore']: + """ + Conditions attaching the tag to a resource on its profile having this sensitivity score. + Structure is documented below. + """ + return pulumi.get(self, "sensitivity_score") + + @property + @pulumi.getter + def tag(self) -> Optional['outputs.PreventionDiscoveryConfigActionTagResourcesTagConditionTag']: + """ + The tag value to attach to resources. + Structure is documented below. + """ + return pulumi.get(self, "tag") + + +@pulumi.output_type +class PreventionDiscoveryConfigActionTagResourcesTagConditionSensitivityScore(dict): + def __init__(__self__, *, + score: str): + """ + :param str score: The sensitivity score applied to the resource. + Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + """ + pulumi.set(__self__, "score", score) + + @property + @pulumi.getter + def score(self) -> str: + """ + The sensitivity score applied to the resource. + Possible values are: `SENSITIVITY_LOW`, `SENSITIVITY_MODERATE`, `SENSITIVITY_HIGH`. + """ + return pulumi.get(self, "score") + + +@pulumi.output_type +class PreventionDiscoveryConfigActionTagResourcesTagConditionTag(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "namespacedValue": + suggest = "namespaced_value" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in PreventionDiscoveryConfigActionTagResourcesTagConditionTag. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + PreventionDiscoveryConfigActionTagResourcesTagConditionTag.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + PreventionDiscoveryConfigActionTagResourcesTagConditionTag.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + namespaced_value: Optional[str] = None): + """ + :param str namespaced_value: The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + """ + if namespaced_value is not None: + pulumi.set(__self__, "namespaced_value", namespaced_value) + + @property + @pulumi.getter(name="namespacedValue") + def namespaced_value(self) -> Optional[str]: + """ + The namespaced name for the tag value to attach to resources. Must be in the format `{parent_id}/{tag_key_short_name}/{short_name}`, for example, "123456/environment/prod". + """ + return pulumi.get(self, "namespaced_value") + + @pulumi.output_type class PreventionDiscoveryConfigError(dict): def __init__(__self__, *, @@ -11909,7 +12107,9 @@ class PreventionDiscoveryConfigTargetBigQueryTargetCadence(dict): @staticmethod def __key_warning(key: str): suggest = None - if key == "schemaModifiedCadence": + if key == "inspectTemplateModifiedCadence": + suggest = "inspect_template_modified_cadence" + elif key == "schemaModifiedCadence": suggest = "schema_modified_cadence" elif key == "tableModifiedCadence": suggest = "table_modified_cadence" @@ -11926,19 +12126,33 @@ def get(self, key: str, default = None) -> Any: return super().get(key, default) def __init__(__self__, *, + inspect_template_modified_cadence: Optional['outputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence'] = None, schema_modified_cadence: Optional['outputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence'] = None, table_modified_cadence: Optional['outputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadence'] = None): """ + :param 'PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadenceArgs' inspect_template_modified_cadence: Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + Structure is documented below. :param 'PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadenceArgs' schema_modified_cadence: Governs when to update data profiles when a schema is modified Structure is documented below. :param 'PreventionDiscoveryConfigTargetBigQueryTargetCadenceTableModifiedCadenceArgs' table_modified_cadence: Governs when to update profile when a table is modified. Structure is documented below. """ + if inspect_template_modified_cadence is not None: + pulumi.set(__self__, "inspect_template_modified_cadence", inspect_template_modified_cadence) if schema_modified_cadence is not None: pulumi.set(__self__, "schema_modified_cadence", schema_modified_cadence) if table_modified_cadence is not None: pulumi.set(__self__, "table_modified_cadence", table_modified_cadence) + @property + @pulumi.getter(name="inspectTemplateModifiedCadence") + def inspect_template_modified_cadence(self) -> Optional['outputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence']: + """ + Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + Structure is documented below. + """ + return pulumi.get(self, "inspect_template_modified_cadence") + @property @pulumi.getter(name="schemaModifiedCadence") def schema_modified_cadence(self) -> Optional['outputs.PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence']: @@ -11958,6 +12172,27 @@ def table_modified_cadence(self) -> Optional['outputs.PreventionDiscoveryConfigT return pulumi.get(self, "table_modified_cadence") +@pulumi.output_type +class PreventionDiscoveryConfigTargetBigQueryTargetCadenceInspectTemplateModifiedCadence(dict): + def __init__(__self__, *, + frequency: Optional[str] = None): + """ + :param str frequency: How frequently data profiles can be updated when the template is modified. Defaults to never. + Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + """ + if frequency is not None: + pulumi.set(__self__, "frequency", frequency) + + @property + @pulumi.getter + def frequency(self) -> Optional[str]: + """ + How frequently data profiles can be updated when the template is modified. Defaults to never. + Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + """ + return pulumi.get(self, "frequency") + + @pulumi.output_type class PreventionDiscoveryConfigTargetBigQueryTargetCadenceSchemaModifiedCadence(dict): def __init__(__self__, *, @@ -12847,7 +13082,9 @@ class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadence(dict): @staticmethod def __key_warning(key: str): suggest = None - if key == "refreshFrequency": + if key == "inspectTemplateModifiedCadence": + suggest = "inspect_template_modified_cadence" + elif key == "refreshFrequency": suggest = "refresh_frequency" elif key == "schemaModifiedCadence": suggest = "schema_modified_cadence" @@ -12864,19 +13101,33 @@ def get(self, key: str, default = None) -> Any: return super().get(key, default) def __init__(__self__, *, + inspect_template_modified_cadence: Optional['outputs.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence'] = None, refresh_frequency: Optional[str] = None, schema_modified_cadence: Optional['outputs.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence'] = None): """ + :param 'PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadenceArgs' inspect_template_modified_cadence: Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + Structure is documented below. :param str refresh_frequency: Data changes in Cloud Storage can't trigger reprofiling. If you set this field, profiles are refreshed at this frequency regardless of whether the underlying buckets have changes. Defaults to never. Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. :param 'PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadenceArgs' schema_modified_cadence: Governs when to update data profiles when a schema is modified Structure is documented below. """ + if inspect_template_modified_cadence is not None: + pulumi.set(__self__, "inspect_template_modified_cadence", inspect_template_modified_cadence) if refresh_frequency is not None: pulumi.set(__self__, "refresh_frequency", refresh_frequency) if schema_modified_cadence is not None: pulumi.set(__self__, "schema_modified_cadence", schema_modified_cadence) + @property + @pulumi.getter(name="inspectTemplateModifiedCadence") + def inspect_template_modified_cadence(self) -> Optional['outputs.PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence']: + """ + Governs when to update data profiles when the inspection rules defined by the `InspectTemplate` change. If not set, changing the template will not cause a data profile to update. + Structure is documented below. + """ + return pulumi.get(self, "inspect_template_modified_cadence") + @property @pulumi.getter(name="refreshFrequency") def refresh_frequency(self) -> Optional[str]: @@ -12896,6 +13147,26 @@ def schema_modified_cadence(self) -> Optional['outputs.PreventionDiscoveryConfig return pulumi.get(self, "schema_modified_cadence") +@pulumi.output_type +class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceInspectTemplateModifiedCadence(dict): + def __init__(__self__, *, + frequency: str): + """ + :param str frequency: How frequently data profiles can be updated when the template is modified. Defaults to never. + Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + """ + pulumi.set(__self__, "frequency", frequency) + + @property + @pulumi.getter + def frequency(self) -> str: + """ + How frequently data profiles can be updated when the template is modified. Defaults to never. + Possible values are: `UPDATE_FREQUENCY_NEVER`, `UPDATE_FREQUENCY_DAILY`, `UPDATE_FREQUENCY_MONTHLY`. + """ + return pulumi.get(self, "frequency") + + @pulumi.output_type class PreventionDiscoveryConfigTargetCloudSqlTargetGenerationCadenceSchemaModifiedCadence(dict): def __init__(__self__, *, diff --git a/sdk/python/pulumi_gcp/dataproc/_inputs.py b/sdk/python/pulumi_gcp/dataproc/_inputs.py index f68ffdc149..d03a3f2b48 100644 --- a/sdk/python/pulumi_gcp/dataproc/_inputs.py +++ b/sdk/python/pulumi_gcp/dataproc/_inputs.py @@ -10288,7 +10288,7 @@ class WorkflowTemplatePlacementManagedClusterConfigArgsDict(TypedDict): """ staging_bucket: NotRequired[pulumi.Input[str]] """ - A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). """ temp_bucket: NotRequired[pulumi.Input[str]] """ @@ -10334,7 +10334,7 @@ def __init__(__self__, *, :param pulumi.Input['WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs'] secondary_worker_config: The Compute Engine config settings for additional worker instances in a cluster. :param pulumi.Input['WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs'] security_config: Security settings for the cluster. :param pulumi.Input['WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs'] software_config: The config settings for software inside the cluster. - :param pulumi.Input[str] staging_bucket: A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + :param pulumi.Input[str] staging_bucket: A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). :param pulumi.Input[str] temp_bucket: A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. :param pulumi.Input['WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs'] worker_config: The Compute Engine config settings for additional worker instances in a cluster. @@ -10519,7 +10519,7 @@ def software_config(self, value: Optional[pulumi.Input['WorkflowTemplatePlacemen @pulumi.getter(name="stagingBucket") def staging_bucket(self) -> Optional[pulumi.Input[str]]: """ - A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). """ return pulumi.get(self, "staging_bucket") @@ -10678,7 +10678,7 @@ class WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgsDict(Type """ metadata: NotRequired[pulumi.Input[Mapping[str, pulumi.Input[str]]]] """ - The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). """ network: NotRequired[pulumi.Input[str]] """ @@ -10714,7 +10714,7 @@ class WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgsDict(Type """ tags: NotRequired[pulumi.Input[Sequence[pulumi.Input[str]]]] """ - The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). """ zone: NotRequired[pulumi.Input[str]] """ @@ -10740,7 +10740,7 @@ def __init__(__self__, *, zone: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[bool] internal_ip_only: If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. - :param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). :param pulumi.Input[str] network: The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `network_uri` nor `subnetwork_uri` is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default` * `default` :param pulumi.Input['WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs'] node_group_affinity: Node Group Affinity for sole-tenant clusters. :param pulumi.Input[str] private_ipv6_google_access: The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL @@ -10749,7 +10749,7 @@ def __init__(__self__, *, :param pulumi.Input[Sequence[pulumi.Input[str]]] service_account_scopes: The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control :param pulumi.Input['WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded Instance Config for clusters using [Compute Engine Shielded VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm). Structure defined below. :param pulumi.Input[str] subnetwork: The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0` * `sub0` - :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). :param pulumi.Input[str] zone: The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` * `us-central1-f` """ if internal_ip_only is not None: @@ -10793,7 +10793,7 @@ def internal_ip_only(self, value: Optional[pulumi.Input[bool]]): @pulumi.getter def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ - The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). """ return pulumi.get(self, "metadata") @@ -10901,7 +10901,7 @@ def subnetwork(self, value: Optional[pulumi.Input[str]]): @pulumi.getter def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ - The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). """ return pulumi.get(self, "tags") @@ -11189,7 +11189,7 @@ class WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgsDict( """ execution_timeout: NotRequired[pulumi.Input[str]] """ - Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. """ elif False: WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgsDict: TypeAlias = Mapping[str, Any] @@ -11201,7 +11201,7 @@ def __init__(__self__, *, execution_timeout: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] executable_file: Required. Cloud Storage URI of executable file. - :param pulumi.Input[str] execution_timeout: Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + :param pulumi.Input[str] execution_timeout: Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. """ if executable_file is not None: pulumi.set(__self__, "executable_file", executable_file) @@ -11224,7 +11224,7 @@ def executable_file(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="executionTimeout") def execution_timeout(self) -> Optional[pulumi.Input[str]]: """ - Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. """ return pulumi.get(self, "execution_timeout") @@ -11237,19 +11237,19 @@ def execution_timeout(self, value: Optional[pulumi.Input[str]]): class WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgsDict(TypedDict): auto_delete_time: NotRequired[pulumi.Input[str]] """ - The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). """ auto_delete_ttl: NotRequired[pulumi.Input[str]] """ - The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). """ idle_delete_ttl: NotRequired[pulumi.Input[str]] """ - The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). """ idle_start_time: NotRequired[pulumi.Input[str]] """ - Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). """ elif False: WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgsDict: TypeAlias = Mapping[str, Any] @@ -11262,10 +11262,10 @@ def __init__(__self__, *, idle_delete_ttl: Optional[pulumi.Input[str]] = None, idle_start_time: Optional[pulumi.Input[str]] = None): """ - :param pulumi.Input[str] auto_delete_time: The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). - :param pulumi.Input[str] auto_delete_ttl: The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). - :param pulumi.Input[str] idle_delete_ttl: The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). - :param pulumi.Input[str] idle_start_time: Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + :param pulumi.Input[str] auto_delete_time: The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). + :param pulumi.Input[str] auto_delete_ttl: The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). + :param pulumi.Input[str] idle_delete_ttl: The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). + :param pulumi.Input[str] idle_start_time: Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). """ if auto_delete_time is not None: pulumi.set(__self__, "auto_delete_time", auto_delete_time) @@ -11280,7 +11280,7 @@ def __init__(__self__, *, @pulumi.getter(name="autoDeleteTime") def auto_delete_time(self) -> Optional[pulumi.Input[str]]: """ - The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). """ return pulumi.get(self, "auto_delete_time") @@ -11292,7 +11292,7 @@ def auto_delete_time(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="autoDeleteTtl") def auto_delete_ttl(self) -> Optional[pulumi.Input[str]]: """ - The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). """ return pulumi.get(self, "auto_delete_ttl") @@ -11304,7 +11304,7 @@ def auto_delete_ttl(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="idleDeleteTtl") def idle_delete_ttl(self) -> Optional[pulumi.Input[str]]: """ - The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). """ return pulumi.get(self, "idle_delete_ttl") @@ -11316,7 +11316,7 @@ def idle_delete_ttl(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="idleStartTime") def idle_start_time(self) -> Optional[pulumi.Input[str]]: """ - Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). """ return pulumi.get(self, "idle_start_time") @@ -11357,7 +11357,7 @@ class WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgsDict(TypedDic """ min_cpu_platform: NotRequired[pulumi.Input[str]] """ - Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). """ num_instances: NotRequired[pulumi.Input[int]] """ @@ -11391,7 +11391,7 @@ def __init__(__self__, *, :param pulumi.Input[bool] is_preemptible: Output only. Specifies that this instance group contains preemptible instances. :param pulumi.Input[str] machine_type: The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. :param pulumi.Input[Sequence[pulumi.Input['WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs']]] managed_group_configs: Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. - :param pulumi.Input[str] min_cpu_platform: Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + :param pulumi.Input[str] min_cpu_platform: Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). :param pulumi.Input[int] num_instances: The number of VM instances in the instance group. For master instance groups, must be set to 1. :param pulumi.Input[str] preemptibility: Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE """ @@ -11504,7 +11504,7 @@ def managed_group_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Inp @pulumi.getter(name="minCpuPlatform") def min_cpu_platform(self) -> Optional[pulumi.Input[str]]: """ - Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). """ return pulumi.get(self, "min_cpu_platform") diff --git a/sdk/python/pulumi_gcp/dataproc/outputs.py b/sdk/python/pulumi_gcp/dataproc/outputs.py index 81f2f20c08..966f5e493b 100644 --- a/sdk/python/pulumi_gcp/dataproc/outputs.py +++ b/sdk/python/pulumi_gcp/dataproc/outputs.py @@ -8232,7 +8232,7 @@ def __init__(__self__, *, :param 'WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs' secondary_worker_config: The Compute Engine config settings for additional worker instances in a cluster. :param 'WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs' security_config: Security settings for the cluster. :param 'WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs' software_config: The config settings for software inside the cluster. - :param str staging_bucket: A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + :param str staging_bucket: A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). :param str temp_bucket: A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. :param 'WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs' worker_config: The Compute Engine config settings for additional worker instances in a cluster. @@ -8369,7 +8369,7 @@ def software_config(self) -> Optional['outputs.WorkflowTemplatePlacementManagedC @pulumi.getter(name="stagingBucket") def staging_bucket(self) -> Optional[str]: """ - A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). + A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging and temp buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). """ return pulumi.get(self, "staging_bucket") @@ -8543,7 +8543,7 @@ def __init__(__self__, *, zone: Optional[str] = None): """ :param bool internal_ip_only: If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. - :param Mapping[str, str] metadata: The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + :param Mapping[str, str] metadata: The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). :param str network: The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `network_uri` nor `subnetwork_uri` is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see /regions/global/default` * `default` :param 'WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinityArgs' node_group_affinity: Node Group Affinity for sole-tenant clusters. :param str private_ipv6_google_access: The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL @@ -8552,7 +8552,7 @@ def __init__(__self__, *, :param Sequence[str] service_account_scopes: The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control :param 'WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigArgs' shielded_instance_config: Shielded Instance Config for clusters using [Compute Engine Shielded VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm). Structure defined below. :param str subnetwork: The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects//regions/us-east1/subnetworks/sub0` * `sub0` - :param Sequence[str] tags: The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + :param Sequence[str] tags: The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). :param str zone: The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/` * `us-central1-f` """ if internal_ip_only is not None: @@ -8592,7 +8592,7 @@ def internal_ip_only(self) -> Optional[bool]: @pulumi.getter def metadata(self) -> Optional[Mapping[str, str]]: """ - The Compute Engine metadata entries to add to all instances (see (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + The Compute Engine metadata entries to add to all instances (see [About VM metadata](https://cloud.google.com/compute/docs/metadata/overview)). """ return pulumi.get(self, "metadata") @@ -8664,7 +8664,7 @@ def subnetwork(self) -> Optional[str]: @pulumi.getter def tags(self) -> Optional[Sequence[str]]: """ - The Compute Engine tags to add to all instances (see (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). + The Compute Engine tags to add to all instances (see [Manage tags for resources](https://cloud.google.com/compute/docs/tag-resources)). """ return pulumi.get(self, "tags") @@ -8948,7 +8948,7 @@ def __init__(__self__, *, execution_timeout: Optional[str] = None): """ :param str executable_file: Required. Cloud Storage URI of executable file. - :param str execution_timeout: Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + :param str execution_timeout: Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. """ if executable_file is not None: pulumi.set(__self__, "executable_file", executable_file) @@ -8967,7 +8967,7 @@ def executable_file(self) -> Optional[str]: @pulumi.getter(name="executionTimeout") def execution_timeout(self) -> Optional[str]: """ - Amount of time executable has to complete. Default is 10 minutes (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. + Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. """ return pulumi.get(self, "execution_timeout") @@ -9003,10 +9003,10 @@ def __init__(__self__, *, idle_delete_ttl: Optional[str] = None, idle_start_time: Optional[str] = None): """ - :param str auto_delete_time: The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). - :param str auto_delete_ttl: The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). - :param str idle_delete_ttl: The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). - :param str idle_start_time: Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + :param str auto_delete_time: The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). + :param str auto_delete_ttl: The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). + :param str idle_delete_ttl: The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). + :param str idle_start_time: Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). """ if auto_delete_time is not None: pulumi.set(__self__, "auto_delete_time", auto_delete_time) @@ -9021,7 +9021,7 @@ def __init__(__self__, *, @pulumi.getter(name="autoDeleteTime") def auto_delete_time(self) -> Optional[str]: """ - The time when cluster will be auto-deleted (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + The time when cluster will be auto-deleted (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). """ return pulumi.get(self, "auto_delete_time") @@ -9029,7 +9029,7 @@ def auto_delete_time(self) -> Optional[str]: @pulumi.getter(name="autoDeleteTtl") def auto_delete_ttl(self) -> Optional[str]: """ - The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). """ return pulumi.get(self, "auto_delete_ttl") @@ -9037,7 +9037,7 @@ def auto_delete_ttl(self) -> Optional[str]: @pulumi.getter(name="idleDeleteTtl") def idle_delete_ttl(self) -> Optional[str]: """ - The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json). + The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json). """ return pulumi.get(self, "idle_delete_ttl") @@ -9045,7 +9045,7 @@ def idle_delete_ttl(self) -> Optional[str]: @pulumi.getter(name="idleStartTime") def idle_start_time(self) -> Optional[str]: """ - Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of (https://developers.google.com/protocol-buffers/docs/proto3#json)). + Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [JSON Mapping - Language Guide (proto 3)](https://developers.google.com/protocol-buffers/docs/proto3#json)). """ return pulumi.get(self, "idle_start_time") @@ -9100,7 +9100,7 @@ def __init__(__self__, *, :param bool is_preemptible: Output only. Specifies that this instance group contains preemptible instances. :param str machine_type: The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/(https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. :param Sequence['WorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigArgs'] managed_group_configs: Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. - :param str min_cpu_platform: Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + :param str min_cpu_platform: Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). :param int num_instances: The number of VM instances in the instance group. For master instance groups, must be set to 1. :param str preemptibility: Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE """ @@ -9185,7 +9185,7 @@ def managed_group_configs(self) -> Optional[Sequence['outputs.WorkflowTemplatePl @pulumi.getter(name="minCpuPlatform") def min_cpu_platform(self) -> Optional[str]: """ - Specifies the minimum cpu platform for the Instance Group. See (https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). + Specifies the minimum cpu platform for the Instance Group. See [Minimum CPU platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). """ return pulumi.get(self, "min_cpu_platform") diff --git a/sdk/python/pulumi_gcp/datastream/_inputs.py b/sdk/python/pulumi_gcp/datastream/_inputs.py index cc18e71418..3ae582d401 100644 --- a/sdk/python/pulumi_gcp/datastream/_inputs.py +++ b/sdk/python/pulumi_gcp/datastream/_inputs.py @@ -155,6 +155,8 @@ 'StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumnArgsDict', 'StreamSourceConfigSqlServerSourceConfigArgs', 'StreamSourceConfigSqlServerSourceConfigArgsDict', + 'StreamSourceConfigSqlServerSourceConfigChangeTablesArgs', + 'StreamSourceConfigSqlServerSourceConfigChangeTablesArgsDict', 'StreamSourceConfigSqlServerSourceConfigExcludeObjectsArgs', 'StreamSourceConfigSqlServerSourceConfigExcludeObjectsArgsDict', 'StreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemaArgs', @@ -171,6 +173,8 @@ 'StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableArgsDict', 'StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableColumnArgs', 'StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableColumnArgsDict', + 'StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs', + 'StreamSourceConfigSqlServerSourceConfigTransactionLogsArgsDict', ] MYPY = False @@ -5645,6 +5649,10 @@ def scale(self, value: Optional[pulumi.Input[int]]): if not MYPY: class StreamSourceConfigSqlServerSourceConfigArgsDict(TypedDict): + change_tables: NotRequired[pulumi.Input['StreamSourceConfigSqlServerSourceConfigChangeTablesArgsDict']] + """ + CDC reader reads from change tables. + """ exclude_objects: NotRequired[pulumi.Input['StreamSourceConfigSqlServerSourceConfigExcludeObjectsArgsDict']] """ SQL Server objects to exclude from the stream. @@ -5663,24 +5671,34 @@ class StreamSourceConfigSqlServerSourceConfigArgsDict(TypedDict): """ Max concurrent CDC tasks. """ + transaction_logs: NotRequired[pulumi.Input['StreamSourceConfigSqlServerSourceConfigTransactionLogsArgsDict']] + """ + CDC reader reads from transaction logs. + """ elif False: StreamSourceConfigSqlServerSourceConfigArgsDict: TypeAlias = Mapping[str, Any] @pulumi.input_type class StreamSourceConfigSqlServerSourceConfigArgs: def __init__(__self__, *, + change_tables: Optional[pulumi.Input['StreamSourceConfigSqlServerSourceConfigChangeTablesArgs']] = None, exclude_objects: Optional[pulumi.Input['StreamSourceConfigSqlServerSourceConfigExcludeObjectsArgs']] = None, include_objects: Optional[pulumi.Input['StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs']] = None, max_concurrent_backfill_tasks: Optional[pulumi.Input[int]] = None, - max_concurrent_cdc_tasks: Optional[pulumi.Input[int]] = None): + max_concurrent_cdc_tasks: Optional[pulumi.Input[int]] = None, + transaction_logs: Optional[pulumi.Input['StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs']] = None): """ + :param pulumi.Input['StreamSourceConfigSqlServerSourceConfigChangeTablesArgs'] change_tables: CDC reader reads from change tables. :param pulumi.Input['StreamSourceConfigSqlServerSourceConfigExcludeObjectsArgs'] exclude_objects: SQL Server objects to exclude from the stream. Structure is documented below. :param pulumi.Input['StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs'] include_objects: SQL Server objects to retrieve from the source. Structure is documented below. :param pulumi.Input[int] max_concurrent_backfill_tasks: Max concurrent backfill tasks. :param pulumi.Input[int] max_concurrent_cdc_tasks: Max concurrent CDC tasks. + :param pulumi.Input['StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs'] transaction_logs: CDC reader reads from transaction logs. """ + if change_tables is not None: + pulumi.set(__self__, "change_tables", change_tables) if exclude_objects is not None: pulumi.set(__self__, "exclude_objects", exclude_objects) if include_objects is not None: @@ -5689,6 +5707,20 @@ def __init__(__self__, *, pulumi.set(__self__, "max_concurrent_backfill_tasks", max_concurrent_backfill_tasks) if max_concurrent_cdc_tasks is not None: pulumi.set(__self__, "max_concurrent_cdc_tasks", max_concurrent_cdc_tasks) + if transaction_logs is not None: + pulumi.set(__self__, "transaction_logs", transaction_logs) + + @property + @pulumi.getter(name="changeTables") + def change_tables(self) -> Optional[pulumi.Input['StreamSourceConfigSqlServerSourceConfigChangeTablesArgs']]: + """ + CDC reader reads from change tables. + """ + return pulumi.get(self, "change_tables") + + @change_tables.setter + def change_tables(self, value: Optional[pulumi.Input['StreamSourceConfigSqlServerSourceConfigChangeTablesArgs']]): + pulumi.set(self, "change_tables", value) @property @pulumi.getter(name="excludeObjects") @@ -5740,6 +5772,30 @@ def max_concurrent_cdc_tasks(self) -> Optional[pulumi.Input[int]]: def max_concurrent_cdc_tasks(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "max_concurrent_cdc_tasks", value) + @property + @pulumi.getter(name="transactionLogs") + def transaction_logs(self) -> Optional[pulumi.Input['StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs']]: + """ + CDC reader reads from transaction logs. + """ + return pulumi.get(self, "transaction_logs") + + @transaction_logs.setter + def transaction_logs(self, value: Optional[pulumi.Input['StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs']]): + pulumi.set(self, "transaction_logs", value) + + +if not MYPY: + class StreamSourceConfigSqlServerSourceConfigChangeTablesArgsDict(TypedDict): + pass +elif False: + StreamSourceConfigSqlServerSourceConfigChangeTablesArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class StreamSourceConfigSqlServerSourceConfigChangeTablesArgs: + def __init__(__self__): + pass + if not MYPY: class StreamSourceConfigSqlServerSourceConfigExcludeObjectsArgsDict(TypedDict): @@ -6411,3 +6467,15 @@ def scale(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "scale", value) +if not MYPY: + class StreamSourceConfigSqlServerSourceConfigTransactionLogsArgsDict(TypedDict): + pass +elif False: + StreamSourceConfigSqlServerSourceConfigTransactionLogsArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs: + def __init__(__self__): + pass + + diff --git a/sdk/python/pulumi_gcp/datastream/outputs.py b/sdk/python/pulumi_gcp/datastream/outputs.py index 26cbce6ae3..c1f28cf97d 100644 --- a/sdk/python/pulumi_gcp/datastream/outputs.py +++ b/sdk/python/pulumi_gcp/datastream/outputs.py @@ -86,6 +86,7 @@ 'StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTable', 'StreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemaPostgresqlTablePostgresqlColumn', 'StreamSourceConfigSqlServerSourceConfig', + 'StreamSourceConfigSqlServerSourceConfigChangeTables', 'StreamSourceConfigSqlServerSourceConfigExcludeObjects', 'StreamSourceConfigSqlServerSourceConfigExcludeObjectsSchema', 'StreamSourceConfigSqlServerSourceConfigExcludeObjectsSchemaTable', @@ -94,6 +95,7 @@ 'StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchema', 'StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTable', 'StreamSourceConfigSqlServerSourceConfigIncludeObjectsSchemaTableColumn', + 'StreamSourceConfigSqlServerSourceConfigTransactionLogs', ] @pulumi.output_type @@ -4408,7 +4410,9 @@ class StreamSourceConfigSqlServerSourceConfig(dict): @staticmethod def __key_warning(key: str): suggest = None - if key == "excludeObjects": + if key == "changeTables": + suggest = "change_tables" + elif key == "excludeObjects": suggest = "exclude_objects" elif key == "includeObjects": suggest = "include_objects" @@ -4416,6 +4420,8 @@ def __key_warning(key: str): suggest = "max_concurrent_backfill_tasks" elif key == "maxConcurrentCdcTasks": suggest = "max_concurrent_cdc_tasks" + elif key == "transactionLogs": + suggest = "transaction_logs" if suggest: pulumi.log.warn(f"Key '{key}' not found in StreamSourceConfigSqlServerSourceConfig. Access the value via the '{suggest}' property getter instead.") @@ -4429,18 +4435,24 @@ def get(self, key: str, default = None) -> Any: return super().get(key, default) def __init__(__self__, *, + change_tables: Optional['outputs.StreamSourceConfigSqlServerSourceConfigChangeTables'] = None, exclude_objects: Optional['outputs.StreamSourceConfigSqlServerSourceConfigExcludeObjects'] = None, include_objects: Optional['outputs.StreamSourceConfigSqlServerSourceConfigIncludeObjects'] = None, max_concurrent_backfill_tasks: Optional[int] = None, - max_concurrent_cdc_tasks: Optional[int] = None): + max_concurrent_cdc_tasks: Optional[int] = None, + transaction_logs: Optional['outputs.StreamSourceConfigSqlServerSourceConfigTransactionLogs'] = None): """ + :param 'StreamSourceConfigSqlServerSourceConfigChangeTablesArgs' change_tables: CDC reader reads from change tables. :param 'StreamSourceConfigSqlServerSourceConfigExcludeObjectsArgs' exclude_objects: SQL Server objects to exclude from the stream. Structure is documented below. :param 'StreamSourceConfigSqlServerSourceConfigIncludeObjectsArgs' include_objects: SQL Server objects to retrieve from the source. Structure is documented below. :param int max_concurrent_backfill_tasks: Max concurrent backfill tasks. :param int max_concurrent_cdc_tasks: Max concurrent CDC tasks. + :param 'StreamSourceConfigSqlServerSourceConfigTransactionLogsArgs' transaction_logs: CDC reader reads from transaction logs. """ + if change_tables is not None: + pulumi.set(__self__, "change_tables", change_tables) if exclude_objects is not None: pulumi.set(__self__, "exclude_objects", exclude_objects) if include_objects is not None: @@ -4449,6 +4461,16 @@ def __init__(__self__, *, pulumi.set(__self__, "max_concurrent_backfill_tasks", max_concurrent_backfill_tasks) if max_concurrent_cdc_tasks is not None: pulumi.set(__self__, "max_concurrent_cdc_tasks", max_concurrent_cdc_tasks) + if transaction_logs is not None: + pulumi.set(__self__, "transaction_logs", transaction_logs) + + @property + @pulumi.getter(name="changeTables") + def change_tables(self) -> Optional['outputs.StreamSourceConfigSqlServerSourceConfigChangeTables']: + """ + CDC reader reads from change tables. + """ + return pulumi.get(self, "change_tables") @property @pulumi.getter(name="excludeObjects") @@ -4484,6 +4506,20 @@ def max_concurrent_cdc_tasks(self) -> Optional[int]: """ return pulumi.get(self, "max_concurrent_cdc_tasks") + @property + @pulumi.getter(name="transactionLogs") + def transaction_logs(self) -> Optional['outputs.StreamSourceConfigSqlServerSourceConfigTransactionLogs']: + """ + CDC reader reads from transaction logs. + """ + return pulumi.get(self, "transaction_logs") + + +@pulumi.output_type +class StreamSourceConfigSqlServerSourceConfigChangeTables(dict): + def __init__(__self__): + pass + @pulumi.output_type class StreamSourceConfigSqlServerSourceConfigExcludeObjects(dict): @@ -4929,3 +4965,9 @@ def scale(self) -> Optional[int]: return pulumi.get(self, "scale") +@pulumi.output_type +class StreamSourceConfigSqlServerSourceConfigTransactionLogs(dict): + def __init__(__self__): + pass + + diff --git a/sdk/python/pulumi_gcp/datastream/stream.py b/sdk/python/pulumi_gcp/datastream/stream.py index 36928118ee..a57d5e13a4 100644 --- a/sdk/python/pulumi_gcp/datastream/stream.py +++ b/sdk/python/pulumi_gcp/datastream/stream.py @@ -47,7 +47,8 @@ def __init__(__self__, *, :param pulumi.Input[bool] create_without_validation: Create the stream without validating it. :param pulumi.Input[str] customer_managed_encryption_key: A reference to a KMS encryption key. If provided, it will be used to encrypt the data. If left blank, data will be encrypted using an internal Stream-specific encryption key provisioned through KMS. - :param pulumi.Input[str] desired_state: Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + :param pulumi.Input[str] desired_state: Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels. **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field 'effective_labels' for all of the labels present on the resource. """ @@ -186,7 +187,8 @@ def customer_managed_encryption_key(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="desiredState") def desired_state(self) -> Optional[pulumi.Input[str]]: """ - Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED """ return pulumi.get(self, "desired_state") @@ -243,7 +245,8 @@ def __init__(__self__, *, :param pulumi.Input[bool] create_without_validation: Create the stream without validating it. :param pulumi.Input[str] customer_managed_encryption_key: A reference to a KMS encryption key. If provided, it will be used to encrypt the data. If left blank, data will be encrypted using an internal Stream-specific encryption key provisioned through KMS. - :param pulumi.Input[str] desired_state: Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + :param pulumi.Input[str] desired_state: Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED :param pulumi.Input['StreamDestinationConfigArgs'] destination_config: Destination connection profile configuration. Structure is documented below. :param pulumi.Input[str] display_name: Display name. @@ -345,7 +348,8 @@ def customer_managed_encryption_key(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="desiredState") def desired_state(self) -> Optional[pulumi.Input[str]]: """ - Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED """ return pulumi.get(self, "desired_state") @@ -924,6 +928,96 @@ def __init__(__self__, }], }], }, + "transaction_logs": {}, + }, + }, + destination_config={ + "destination_connection_profile": destination.id, + "bigquery_destination_config": { + "data_freshness": "900s", + "source_hierarchy_datasets": { + "dataset_template": { + "location": "us-central1", + }, + }, + }, + }, + backfill_none={}) + ``` + ### Datastream Stream Sql Server Change Tables + + ```python + import pulumi + import pulumi_gcp as gcp + + instance = gcp.sql.DatabaseInstance("instance", + name="sql-server", + database_version="SQLSERVER_2019_STANDARD", + region="us-central1", + root_password="root-password", + deletion_protection=True, + settings={ + "tier": "db-custom-2-4096", + "ip_configuration": { + "authorized_networks": [ + { + "value": "34.71.242.81", + }, + { + "value": "34.72.28.29", + }, + { + "value": "34.67.6.157", + }, + { + "value": "34.67.234.134", + }, + { + "value": "34.72.239.218", + }, + ], + }, + }) + user = gcp.sql.User("user", + name="user", + instance=instance.name, + password="password") + db = gcp.sql.Database("db", + name="db", + instance=instance.name, + opts = pulumi.ResourceOptions(depends_on=[user])) + source = gcp.datastream.ConnectionProfile("source", + display_name="SQL Server Source", + location="us-central1", + connection_profile_id="source-profile", + sql_server_profile={ + "hostname": instance.public_ip_address, + "port": 1433, + "username": user.name, + "password": user.password, + "database": db.name, + }) + destination = gcp.datastream.ConnectionProfile("destination", + display_name="BigQuery Destination", + location="us-central1", + connection_profile_id="destination-profile", + bigquery_profile={}) + default = gcp.datastream.Stream("default", + display_name="SQL Server to BigQuery", + location="us-central1", + stream_id="stream", + source_config={ + "source_connection_profile": source.id, + "sql_server_source_config": { + "include_objects": { + "schemas": [{ + "schema": "schema", + "tables": [{ + "table": "table", + }], + }], + }, + "change_tables": {}, }, }, destination_config={ @@ -1234,7 +1328,8 @@ def __init__(__self__, :param pulumi.Input[bool] create_without_validation: Create the stream without validating it. :param pulumi.Input[str] customer_managed_encryption_key: A reference to a KMS encryption key. If provided, it will be used to encrypt the data. If left blank, data will be encrypted using an internal Stream-specific encryption key provisioned through KMS. - :param pulumi.Input[str] desired_state: Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + :param pulumi.Input[str] desired_state: Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED :param pulumi.Input[Union['StreamDestinationConfigArgs', 'StreamDestinationConfigArgsDict']] destination_config: Destination connection profile configuration. Structure is documented below. :param pulumi.Input[str] display_name: Display name. @@ -1670,6 +1765,96 @@ def __init__(__self__, }], }], }, + "transaction_logs": {}, + }, + }, + destination_config={ + "destination_connection_profile": destination.id, + "bigquery_destination_config": { + "data_freshness": "900s", + "source_hierarchy_datasets": { + "dataset_template": { + "location": "us-central1", + }, + }, + }, + }, + backfill_none={}) + ``` + ### Datastream Stream Sql Server Change Tables + + ```python + import pulumi + import pulumi_gcp as gcp + + instance = gcp.sql.DatabaseInstance("instance", + name="sql-server", + database_version="SQLSERVER_2019_STANDARD", + region="us-central1", + root_password="root-password", + deletion_protection=True, + settings={ + "tier": "db-custom-2-4096", + "ip_configuration": { + "authorized_networks": [ + { + "value": "34.71.242.81", + }, + { + "value": "34.72.28.29", + }, + { + "value": "34.67.6.157", + }, + { + "value": "34.67.234.134", + }, + { + "value": "34.72.239.218", + }, + ], + }, + }) + user = gcp.sql.User("user", + name="user", + instance=instance.name, + password="password") + db = gcp.sql.Database("db", + name="db", + instance=instance.name, + opts = pulumi.ResourceOptions(depends_on=[user])) + source = gcp.datastream.ConnectionProfile("source", + display_name="SQL Server Source", + location="us-central1", + connection_profile_id="source-profile", + sql_server_profile={ + "hostname": instance.public_ip_address, + "port": 1433, + "username": user.name, + "password": user.password, + "database": db.name, + }) + destination = gcp.datastream.ConnectionProfile("destination", + display_name="BigQuery Destination", + location="us-central1", + connection_profile_id="destination-profile", + bigquery_profile={}) + default = gcp.datastream.Stream("default", + display_name="SQL Server to BigQuery", + location="us-central1", + stream_id="stream", + source_config={ + "source_connection_profile": source.id, + "sql_server_source_config": { + "include_objects": { + "schemas": [{ + "schema": "schema", + "tables": [{ + "table": "table", + }], + }], + }, + "change_tables": {}, }, }, destination_config={ @@ -2075,7 +2260,8 @@ def get(resource_name: str, :param pulumi.Input[bool] create_without_validation: Create the stream without validating it. :param pulumi.Input[str] customer_managed_encryption_key: A reference to a KMS encryption key. If provided, it will be used to encrypt the data. If left blank, data will be encrypted using an internal Stream-specific encryption key provisioned through KMS. - :param pulumi.Input[str] desired_state: Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + :param pulumi.Input[str] desired_state: Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED :param pulumi.Input[Union['StreamDestinationConfigArgs', 'StreamDestinationConfigArgsDict']] destination_config: Destination connection profile configuration. Structure is documented below. :param pulumi.Input[str] display_name: Display name. @@ -2150,7 +2336,8 @@ def customer_managed_encryption_key(self) -> pulumi.Output[Optional[str]]: @pulumi.getter(name="desiredState") def desired_state(self) -> pulumi.Output[Optional[str]]: """ - Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. + Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream. Possible + values: NOT_STARTED, RUNNING, PAUSED. Default: NOT_STARTED """ return pulumi.get(self, "desired_state") diff --git a/sdk/python/pulumi_gcp/discoveryengine/_inputs.py b/sdk/python/pulumi_gcp/discoveryengine/_inputs.py index 5d09706869..4a3e40f491 100644 --- a/sdk/python/pulumi_gcp/discoveryengine/_inputs.py +++ b/sdk/python/pulumi_gcp/discoveryengine/_inputs.py @@ -25,16 +25,24 @@ 'ChatEngineCommonConfigArgsDict', 'DataStoreDocumentProcessingConfigArgs', 'DataStoreDocumentProcessingConfigArgsDict', + 'DataStoreDocumentProcessingConfigChunkingConfigArgs', + 'DataStoreDocumentProcessingConfigChunkingConfigArgsDict', + 'DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs', + 'DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgsDict', 'DataStoreDocumentProcessingConfigDefaultParsingConfigArgs', 'DataStoreDocumentProcessingConfigDefaultParsingConfigArgsDict', 'DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigArgs', 'DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigArgsDict', + 'DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs', + 'DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgsDict', 'DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigArgs', 'DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigArgsDict', 'DataStoreDocumentProcessingConfigParsingConfigOverrideArgs', 'DataStoreDocumentProcessingConfigParsingConfigOverrideArgsDict', 'DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigArgs', 'DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigArgsDict', + 'DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs', + 'DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgsDict', 'DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigArgs', 'DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigArgsDict', 'SearchEngineCommonConfigArgs', @@ -244,6 +252,11 @@ def company_name(self, value: Optional[pulumi.Input[str]]): if not MYPY: class DataStoreDocumentProcessingConfigArgsDict(TypedDict): + chunking_config: NotRequired[pulumi.Input['DataStoreDocumentProcessingConfigChunkingConfigArgsDict']] + """ + Whether chunking mode is enabled. + Structure is documented below. + """ default_parsing_config: NotRequired[pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigArgsDict']] """ Configurations for default Document parser. If not specified, this resource @@ -267,10 +280,13 @@ class DataStoreDocumentProcessingConfigArgsDict(TypedDict): @pulumi.input_type class DataStoreDocumentProcessingConfigArgs: def __init__(__self__, *, + chunking_config: Optional[pulumi.Input['DataStoreDocumentProcessingConfigChunkingConfigArgs']] = None, default_parsing_config: Optional[pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigArgs']] = None, name: Optional[pulumi.Input[str]] = None, parsing_config_overrides: Optional[pulumi.Input[Sequence[pulumi.Input['DataStoreDocumentProcessingConfigParsingConfigOverrideArgs']]]] = None): """ + :param pulumi.Input['DataStoreDocumentProcessingConfigChunkingConfigArgs'] chunking_config: Whether chunking mode is enabled. + Structure is documented below. :param pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigArgs'] default_parsing_config: Configurations for default Document parser. If not specified, this resource will be configured to use a default DigitalParsingConfig, and the default parsing config will be applied to all file types for Document parsing. @@ -280,6 +296,8 @@ def __init__(__self__, *, `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/documentProcessingConfig`. :param pulumi.Input[Sequence[pulumi.Input['DataStoreDocumentProcessingConfigParsingConfigOverrideArgs']]] parsing_config_overrides: Map from file type to override the default parsing configuration based on the file type. Supported keys: """ + if chunking_config is not None: + pulumi.set(__self__, "chunking_config", chunking_config) if default_parsing_config is not None: pulumi.set(__self__, "default_parsing_config", default_parsing_config) if name is not None: @@ -287,6 +305,19 @@ def __init__(__self__, *, if parsing_config_overrides is not None: pulumi.set(__self__, "parsing_config_overrides", parsing_config_overrides) + @property + @pulumi.getter(name="chunkingConfig") + def chunking_config(self) -> Optional[pulumi.Input['DataStoreDocumentProcessingConfigChunkingConfigArgs']]: + """ + Whether chunking mode is enabled. + Structure is documented below. + """ + return pulumi.get(self, "chunking_config") + + @chunking_config.setter + def chunking_config(self, value: Optional[pulumi.Input['DataStoreDocumentProcessingConfigChunkingConfigArgs']]): + pulumi.set(self, "chunking_config", value) + @property @pulumi.getter(name="defaultParsingConfig") def default_parsing_config(self) -> Optional[pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigArgs']]: @@ -329,12 +360,109 @@ def parsing_config_overrides(self, value: Optional[pulumi.Input[Sequence[pulumi. pulumi.set(self, "parsing_config_overrides", value) +if not MYPY: + class DataStoreDocumentProcessingConfigChunkingConfigArgsDict(TypedDict): + layout_based_chunking_config: NotRequired[pulumi.Input['DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgsDict']] + """ + Configuration for the layout based chunking. + Structure is documented below. + """ +elif False: + DataStoreDocumentProcessingConfigChunkingConfigArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class DataStoreDocumentProcessingConfigChunkingConfigArgs: + def __init__(__self__, *, + layout_based_chunking_config: Optional[pulumi.Input['DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs']] = None): + """ + :param pulumi.Input['DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs'] layout_based_chunking_config: Configuration for the layout based chunking. + Structure is documented below. + """ + if layout_based_chunking_config is not None: + pulumi.set(__self__, "layout_based_chunking_config", layout_based_chunking_config) + + @property + @pulumi.getter(name="layoutBasedChunkingConfig") + def layout_based_chunking_config(self) -> Optional[pulumi.Input['DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs']]: + """ + Configuration for the layout based chunking. + Structure is documented below. + """ + return pulumi.get(self, "layout_based_chunking_config") + + @layout_based_chunking_config.setter + def layout_based_chunking_config(self, value: Optional[pulumi.Input['DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs']]): + pulumi.set(self, "layout_based_chunking_config", value) + + +if not MYPY: + class DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgsDict(TypedDict): + chunk_size: NotRequired[pulumi.Input[int]] + """ + The token size limit for each chunk. + Supported values: 100-500 (inclusive). Default value: 500. + """ + include_ancestor_headings: NotRequired[pulumi.Input[bool]] + """ + Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + Default value: False. + """ +elif False: + DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs: + def __init__(__self__, *, + chunk_size: Optional[pulumi.Input[int]] = None, + include_ancestor_headings: Optional[pulumi.Input[bool]] = None): + """ + :param pulumi.Input[int] chunk_size: The token size limit for each chunk. + Supported values: 100-500 (inclusive). Default value: 500. + :param pulumi.Input[bool] include_ancestor_headings: Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + Default value: False. + """ + if chunk_size is not None: + pulumi.set(__self__, "chunk_size", chunk_size) + if include_ancestor_headings is not None: + pulumi.set(__self__, "include_ancestor_headings", include_ancestor_headings) + + @property + @pulumi.getter(name="chunkSize") + def chunk_size(self) -> Optional[pulumi.Input[int]]: + """ + The token size limit for each chunk. + Supported values: 100-500 (inclusive). Default value: 500. + """ + return pulumi.get(self, "chunk_size") + + @chunk_size.setter + def chunk_size(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "chunk_size", value) + + @property + @pulumi.getter(name="includeAncestorHeadings") + def include_ancestor_headings(self) -> Optional[pulumi.Input[bool]]: + """ + Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + Default value: False. + """ + return pulumi.get(self, "include_ancestor_headings") + + @include_ancestor_headings.setter + def include_ancestor_headings(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "include_ancestor_headings", value) + + if not MYPY: class DataStoreDocumentProcessingConfigDefaultParsingConfigArgsDict(TypedDict): digital_parsing_config: NotRequired[pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigArgsDict']] """ Configurations applied to digital parser. """ + layout_parsing_config: NotRequired[pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgsDict']] + """ + Configurations applied to layout parser. + """ ocr_parsing_config: NotRequired[pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigArgsDict']] """ Configurations applied to OCR parser. Currently it only applies to PDFs. @@ -347,14 +475,18 @@ class DataStoreDocumentProcessingConfigDefaultParsingConfigArgsDict(TypedDict): class DataStoreDocumentProcessingConfigDefaultParsingConfigArgs: def __init__(__self__, *, digital_parsing_config: Optional[pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigArgs']] = None, + layout_parsing_config: Optional[pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs']] = None, ocr_parsing_config: Optional[pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigArgs']] = None): """ :param pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigArgs'] digital_parsing_config: Configurations applied to digital parser. + :param pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs'] layout_parsing_config: Configurations applied to layout parser. :param pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigArgs'] ocr_parsing_config: Configurations applied to OCR parser. Currently it only applies to PDFs. Structure is documented below. """ if digital_parsing_config is not None: pulumi.set(__self__, "digital_parsing_config", digital_parsing_config) + if layout_parsing_config is not None: + pulumi.set(__self__, "layout_parsing_config", layout_parsing_config) if ocr_parsing_config is not None: pulumi.set(__self__, "ocr_parsing_config", ocr_parsing_config) @@ -370,6 +502,18 @@ def digital_parsing_config(self) -> Optional[pulumi.Input['DataStoreDocumentProc def digital_parsing_config(self, value: Optional[pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigArgs']]): pulumi.set(self, "digital_parsing_config", value) + @property + @pulumi.getter(name="layoutParsingConfig") + def layout_parsing_config(self) -> Optional[pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs']]: + """ + Configurations applied to layout parser. + """ + return pulumi.get(self, "layout_parsing_config") + + @layout_parsing_config.setter + def layout_parsing_config(self, value: Optional[pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs']]): + pulumi.set(self, "layout_parsing_config", value) + @property @pulumi.getter(name="ocrParsingConfig") def ocr_parsing_config(self) -> Optional[pulumi.Input['DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigArgs']]: @@ -396,6 +540,18 @@ def __init__(__self__): pass +if not MYPY: + class DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgsDict(TypedDict): + pass +elif False: + DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs: + def __init__(__self__): + pass + + if not MYPY: class DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigArgsDict(TypedDict): use_native_text: NotRequired[pulumi.Input[bool]] @@ -438,6 +594,10 @@ class DataStoreDocumentProcessingConfigParsingConfigOverrideArgsDict(TypedDict): """ Configurations applied to digital parser. """ + layout_parsing_config: NotRequired[pulumi.Input['DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgsDict']] + """ + Configurations applied to layout parser. + """ ocr_parsing_config: NotRequired[pulumi.Input['DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigArgsDict']] """ Configurations applied to OCR parser. Currently it only applies to PDFs. @@ -451,16 +611,20 @@ class DataStoreDocumentProcessingConfigParsingConfigOverrideArgs: def __init__(__self__, *, file_type: pulumi.Input[str], digital_parsing_config: Optional[pulumi.Input['DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigArgs']] = None, + layout_parsing_config: Optional[pulumi.Input['DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs']] = None, ocr_parsing_config: Optional[pulumi.Input['DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigArgs']] = None): """ :param pulumi.Input[str] file_type: The identifier for this object. Format specified above. :param pulumi.Input['DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigArgs'] digital_parsing_config: Configurations applied to digital parser. + :param pulumi.Input['DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs'] layout_parsing_config: Configurations applied to layout parser. :param pulumi.Input['DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigArgs'] ocr_parsing_config: Configurations applied to OCR parser. Currently it only applies to PDFs. Structure is documented below. """ pulumi.set(__self__, "file_type", file_type) if digital_parsing_config is not None: pulumi.set(__self__, "digital_parsing_config", digital_parsing_config) + if layout_parsing_config is not None: + pulumi.set(__self__, "layout_parsing_config", layout_parsing_config) if ocr_parsing_config is not None: pulumi.set(__self__, "ocr_parsing_config", ocr_parsing_config) @@ -488,6 +652,18 @@ def digital_parsing_config(self) -> Optional[pulumi.Input['DataStoreDocumentProc def digital_parsing_config(self, value: Optional[pulumi.Input['DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigArgs']]): pulumi.set(self, "digital_parsing_config", value) + @property + @pulumi.getter(name="layoutParsingConfig") + def layout_parsing_config(self) -> Optional[pulumi.Input['DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs']]: + """ + Configurations applied to layout parser. + """ + return pulumi.get(self, "layout_parsing_config") + + @layout_parsing_config.setter + def layout_parsing_config(self, value: Optional[pulumi.Input['DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs']]): + pulumi.set(self, "layout_parsing_config", value) + @property @pulumi.getter(name="ocrParsingConfig") def ocr_parsing_config(self) -> Optional[pulumi.Input['DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigArgs']]: @@ -514,6 +690,18 @@ def __init__(__self__): pass +if not MYPY: + class DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgsDict(TypedDict): + pass +elif False: + DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs: + def __init__(__self__): + pass + + if not MYPY: class DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigArgsDict(TypedDict): use_native_text: NotRequired[pulumi.Input[bool]] diff --git a/sdk/python/pulumi_gcp/discoveryengine/data_store.py b/sdk/python/pulumi_gcp/discoveryengine/data_store.py index 803c0fce37..bb73aa27a4 100644 --- a/sdk/python/pulumi_gcp/discoveryengine/data_store.py +++ b/sdk/python/pulumi_gcp/discoveryengine/data_store.py @@ -42,7 +42,7 @@ def __init__(__self__, *, :param pulumi.Input[str] display_name: The display name of the data store. This field must be a UTF-8 encoded string with a length limit of 128 characters. :param pulumi.Input[str] industry_vertical: The industry vertical that the data store registers. - Possible values are: `GENERIC`, `MEDIA`. + Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. :param pulumi.Input[str] location: The geographic location where the data store should reside. The value can only be one of "global", "us" and "eu". :param pulumi.Input[bool] create_advanced_site_search: If true, an advanced data store for site search will be created. If the @@ -60,7 +60,7 @@ def __init__(__self__, *, This flag cannot be specified if `data_store.starting_schema` is specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] solution_types: The solutions that the data store enrolls. - Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. """ pulumi.set(__self__, "content_config", content_config) pulumi.set(__self__, "data_store_id", data_store_id) @@ -124,7 +124,7 @@ def display_name(self, value: pulumi.Input[str]): def industry_vertical(self) -> pulumi.Input[str]: """ The industry vertical that the data store registers. - Possible values are: `GENERIC`, `MEDIA`. + Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. """ return pulumi.get(self, "industry_vertical") @@ -208,7 +208,7 @@ def skip_default_schema_creation(self, value: Optional[pulumi.Input[bool]]): def solution_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ The solutions that the data store enrolls. - Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. """ return pulumi.get(self, "solution_types") @@ -251,7 +251,7 @@ def __init__(__self__, *, :param pulumi.Input['DataStoreDocumentProcessingConfigArgs'] document_processing_config: Configuration for Document understanding and enrichment. Structure is documented below. :param pulumi.Input[str] industry_vertical: The industry vertical that the data store registers. - Possible values are: `GENERIC`, `MEDIA`. + Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. :param pulumi.Input[str] location: The geographic location where the data store should reside. The value can only be one of "global", "us" and "eu". :param pulumi.Input[str] name: The unique full resource name of the data store. Values are of the format @@ -268,7 +268,7 @@ def __init__(__self__, *, This flag cannot be specified if `data_store.starting_schema` is specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] solution_types: The solutions that the data store enrolls. - Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. """ if content_config is not None: pulumi.set(__self__, "content_config", content_config) @@ -394,7 +394,7 @@ def document_processing_config(self, value: Optional[pulumi.Input['DataStoreDocu def industry_vertical(self) -> Optional[pulumi.Input[str]]: """ The industry vertical that the data store registers. - Possible values are: `GENERIC`, `MEDIA`. + Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. """ return pulumi.get(self, "industry_vertical") @@ -466,7 +466,7 @@ def skip_default_schema_creation(self, value: Optional[pulumi.Input[bool]]): def solution_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ The solutions that the data store enrolls. - Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. """ return pulumi.get(self, "solution_types") @@ -587,7 +587,7 @@ def __init__(__self__, :param pulumi.Input[Union['DataStoreDocumentProcessingConfigArgs', 'DataStoreDocumentProcessingConfigArgsDict']] document_processing_config: Configuration for Document understanding and enrichment. Structure is documented below. :param pulumi.Input[str] industry_vertical: The industry vertical that the data store registers. - Possible values are: `GENERIC`, `MEDIA`. + Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. :param pulumi.Input[str] location: The geographic location where the data store should reside. The value can only be one of "global", "us" and "eu". :param pulumi.Input[str] project: The ID of the project in which the resource belongs. @@ -600,7 +600,7 @@ def __init__(__self__, This flag cannot be specified if `data_store.starting_schema` is specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] solution_types: The solutions that the data store enrolls. - Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. """ ... @overload @@ -791,7 +791,7 @@ def get(resource_name: str, :param pulumi.Input[Union['DataStoreDocumentProcessingConfigArgs', 'DataStoreDocumentProcessingConfigArgsDict']] document_processing_config: Configuration for Document understanding and enrichment. Structure is documented below. :param pulumi.Input[str] industry_vertical: The industry vertical that the data store registers. - Possible values are: `GENERIC`, `MEDIA`. + Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. :param pulumi.Input[str] location: The geographic location where the data store should reside. The value can only be one of "global", "us" and "eu". :param pulumi.Input[str] name: The unique full resource name of the data store. Values are of the format @@ -808,7 +808,7 @@ def get(resource_name: str, This flag cannot be specified if `data_store.starting_schema` is specified. :param pulumi.Input[Sequence[pulumi.Input[str]]] solution_types: The solutions that the data store enrolls. - Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) @@ -898,7 +898,7 @@ def document_processing_config(self) -> pulumi.Output[Optional['outputs.DataStor def industry_vertical(self) -> pulumi.Output[str]: """ The industry vertical that the data store registers. - Possible values are: `GENERIC`, `MEDIA`. + Possible values are: `GENERIC`, `MEDIA`, `HEALTHCARE_FHIR`. """ return pulumi.get(self, "industry_vertical") @@ -950,7 +950,7 @@ def skip_default_schema_creation(self) -> pulumi.Output[Optional[bool]]: def solution_types(self) -> pulumi.Output[Optional[Sequence[str]]]: """ The solutions that the data store enrolls. - Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`. + Each value may be one of: `SOLUTION_TYPE_RECOMMENDATION`, `SOLUTION_TYPE_SEARCH`, `SOLUTION_TYPE_CHAT`, `SOLUTION_TYPE_GENERATIVE_CHAT`. """ return pulumi.get(self, "solution_types") diff --git a/sdk/python/pulumi_gcp/discoveryengine/outputs.py b/sdk/python/pulumi_gcp/discoveryengine/outputs.py index d321175825..10246056b2 100644 --- a/sdk/python/pulumi_gcp/discoveryengine/outputs.py +++ b/sdk/python/pulumi_gcp/discoveryengine/outputs.py @@ -21,11 +21,15 @@ 'ChatEngineChatEngineMetadata', 'ChatEngineCommonConfig', 'DataStoreDocumentProcessingConfig', + 'DataStoreDocumentProcessingConfigChunkingConfig', + 'DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig', 'DataStoreDocumentProcessingConfigDefaultParsingConfig', 'DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig', + 'DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig', 'DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig', 'DataStoreDocumentProcessingConfigParsingConfigOverride', 'DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfig', + 'DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig', 'DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig', 'SearchEngineCommonConfig', 'SearchEngineSearchEngineConfig', @@ -223,7 +227,9 @@ class DataStoreDocumentProcessingConfig(dict): @staticmethod def __key_warning(key: str): suggest = None - if key == "defaultParsingConfig": + if key == "chunkingConfig": + suggest = "chunking_config" + elif key == "defaultParsingConfig": suggest = "default_parsing_config" elif key == "parsingConfigOverrides": suggest = "parsing_config_overrides" @@ -240,10 +246,13 @@ def get(self, key: str, default = None) -> Any: return super().get(key, default) def __init__(__self__, *, + chunking_config: Optional['outputs.DataStoreDocumentProcessingConfigChunkingConfig'] = None, default_parsing_config: Optional['outputs.DataStoreDocumentProcessingConfigDefaultParsingConfig'] = None, name: Optional[str] = None, parsing_config_overrides: Optional[Sequence['outputs.DataStoreDocumentProcessingConfigParsingConfigOverride']] = None): """ + :param 'DataStoreDocumentProcessingConfigChunkingConfigArgs' chunking_config: Whether chunking mode is enabled. + Structure is documented below. :param 'DataStoreDocumentProcessingConfigDefaultParsingConfigArgs' default_parsing_config: Configurations for default Document parser. If not specified, this resource will be configured to use a default DigitalParsingConfig, and the default parsing config will be applied to all file types for Document parsing. @@ -253,6 +262,8 @@ def __init__(__self__, *, `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/documentProcessingConfig`. :param Sequence['DataStoreDocumentProcessingConfigParsingConfigOverrideArgs'] parsing_config_overrides: Map from file type to override the default parsing configuration based on the file type. Supported keys: """ + if chunking_config is not None: + pulumi.set(__self__, "chunking_config", chunking_config) if default_parsing_config is not None: pulumi.set(__self__, "default_parsing_config", default_parsing_config) if name is not None: @@ -260,6 +271,15 @@ def __init__(__self__, *, if parsing_config_overrides is not None: pulumi.set(__self__, "parsing_config_overrides", parsing_config_overrides) + @property + @pulumi.getter(name="chunkingConfig") + def chunking_config(self) -> Optional['outputs.DataStoreDocumentProcessingConfigChunkingConfig']: + """ + Whether chunking mode is enabled. + Structure is documented below. + """ + return pulumi.get(self, "chunking_config") + @property @pulumi.getter(name="defaultParsingConfig") def default_parsing_config(self) -> Optional['outputs.DataStoreDocumentProcessingConfigDefaultParsingConfig']: @@ -290,6 +310,98 @@ def parsing_config_overrides(self) -> Optional[Sequence['outputs.DataStoreDocume return pulumi.get(self, "parsing_config_overrides") +@pulumi.output_type +class DataStoreDocumentProcessingConfigChunkingConfig(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "layoutBasedChunkingConfig": + suggest = "layout_based_chunking_config" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in DataStoreDocumentProcessingConfigChunkingConfig. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + DataStoreDocumentProcessingConfigChunkingConfig.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + DataStoreDocumentProcessingConfigChunkingConfig.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + layout_based_chunking_config: Optional['outputs.DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig'] = None): + """ + :param 'DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfigArgs' layout_based_chunking_config: Configuration for the layout based chunking. + Structure is documented below. + """ + if layout_based_chunking_config is not None: + pulumi.set(__self__, "layout_based_chunking_config", layout_based_chunking_config) + + @property + @pulumi.getter(name="layoutBasedChunkingConfig") + def layout_based_chunking_config(self) -> Optional['outputs.DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig']: + """ + Configuration for the layout based chunking. + Structure is documented below. + """ + return pulumi.get(self, "layout_based_chunking_config") + + +@pulumi.output_type +class DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "chunkSize": + suggest = "chunk_size" + elif key == "includeAncestorHeadings": + suggest = "include_ancestor_headings" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + DataStoreDocumentProcessingConfigChunkingConfigLayoutBasedChunkingConfig.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + chunk_size: Optional[int] = None, + include_ancestor_headings: Optional[bool] = None): + """ + :param int chunk_size: The token size limit for each chunk. + Supported values: 100-500 (inclusive). Default value: 500. + :param bool include_ancestor_headings: Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + Default value: False. + """ + if chunk_size is not None: + pulumi.set(__self__, "chunk_size", chunk_size) + if include_ancestor_headings is not None: + pulumi.set(__self__, "include_ancestor_headings", include_ancestor_headings) + + @property + @pulumi.getter(name="chunkSize") + def chunk_size(self) -> Optional[int]: + """ + The token size limit for each chunk. + Supported values: 100-500 (inclusive). Default value: 500. + """ + return pulumi.get(self, "chunk_size") + + @property + @pulumi.getter(name="includeAncestorHeadings") + def include_ancestor_headings(self) -> Optional[bool]: + """ + Whether to include appending different levels of headings to chunks from the middle of the document to prevent context loss. + Default value: False. + """ + return pulumi.get(self, "include_ancestor_headings") + + @pulumi.output_type class DataStoreDocumentProcessingConfigDefaultParsingConfig(dict): @staticmethod @@ -297,6 +409,8 @@ def __key_warning(key: str): suggest = None if key == "digitalParsingConfig": suggest = "digital_parsing_config" + elif key == "layoutParsingConfig": + suggest = "layout_parsing_config" elif key == "ocrParsingConfig": suggest = "ocr_parsing_config" @@ -313,14 +427,18 @@ def get(self, key: str, default = None) -> Any: def __init__(__self__, *, digital_parsing_config: Optional['outputs.DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfig'] = None, + layout_parsing_config: Optional['outputs.DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig'] = None, ocr_parsing_config: Optional['outputs.DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig'] = None): """ :param 'DataStoreDocumentProcessingConfigDefaultParsingConfigDigitalParsingConfigArgs' digital_parsing_config: Configurations applied to digital parser. + :param 'DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfigArgs' layout_parsing_config: Configurations applied to layout parser. :param 'DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfigArgs' ocr_parsing_config: Configurations applied to OCR parser. Currently it only applies to PDFs. Structure is documented below. """ if digital_parsing_config is not None: pulumi.set(__self__, "digital_parsing_config", digital_parsing_config) + if layout_parsing_config is not None: + pulumi.set(__self__, "layout_parsing_config", layout_parsing_config) if ocr_parsing_config is not None: pulumi.set(__self__, "ocr_parsing_config", ocr_parsing_config) @@ -332,6 +450,14 @@ def digital_parsing_config(self) -> Optional['outputs.DataStoreDocumentProcessin """ return pulumi.get(self, "digital_parsing_config") + @property + @pulumi.getter(name="layoutParsingConfig") + def layout_parsing_config(self) -> Optional['outputs.DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig']: + """ + Configurations applied to layout parser. + """ + return pulumi.get(self, "layout_parsing_config") + @property @pulumi.getter(name="ocrParsingConfig") def ocr_parsing_config(self) -> Optional['outputs.DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig']: @@ -348,6 +474,12 @@ def __init__(__self__): pass +@pulumi.output_type +class DataStoreDocumentProcessingConfigDefaultParsingConfigLayoutParsingConfig(dict): + def __init__(__self__): + pass + + @pulumi.output_type class DataStoreDocumentProcessingConfigDefaultParsingConfigOcrParsingConfig(dict): @staticmethod @@ -393,6 +525,8 @@ def __key_warning(key: str): suggest = "file_type" elif key == "digitalParsingConfig": suggest = "digital_parsing_config" + elif key == "layoutParsingConfig": + suggest = "layout_parsing_config" elif key == "ocrParsingConfig": suggest = "ocr_parsing_config" @@ -410,16 +544,20 @@ def get(self, key: str, default = None) -> Any: def __init__(__self__, *, file_type: str, digital_parsing_config: Optional['outputs.DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfig'] = None, + layout_parsing_config: Optional['outputs.DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig'] = None, ocr_parsing_config: Optional['outputs.DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig'] = None): """ :param str file_type: The identifier for this object. Format specified above. :param 'DataStoreDocumentProcessingConfigParsingConfigOverrideDigitalParsingConfigArgs' digital_parsing_config: Configurations applied to digital parser. + :param 'DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfigArgs' layout_parsing_config: Configurations applied to layout parser. :param 'DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfigArgs' ocr_parsing_config: Configurations applied to OCR parser. Currently it only applies to PDFs. Structure is documented below. """ pulumi.set(__self__, "file_type", file_type) if digital_parsing_config is not None: pulumi.set(__self__, "digital_parsing_config", digital_parsing_config) + if layout_parsing_config is not None: + pulumi.set(__self__, "layout_parsing_config", layout_parsing_config) if ocr_parsing_config is not None: pulumi.set(__self__, "ocr_parsing_config", ocr_parsing_config) @@ -439,6 +577,14 @@ def digital_parsing_config(self) -> Optional['outputs.DataStoreDocumentProcessin """ return pulumi.get(self, "digital_parsing_config") + @property + @pulumi.getter(name="layoutParsingConfig") + def layout_parsing_config(self) -> Optional['outputs.DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig']: + """ + Configurations applied to layout parser. + """ + return pulumi.get(self, "layout_parsing_config") + @property @pulumi.getter(name="ocrParsingConfig") def ocr_parsing_config(self) -> Optional['outputs.DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig']: @@ -455,6 +601,12 @@ def __init__(__self__): pass +@pulumi.output_type +class DataStoreDocumentProcessingConfigParsingConfigOverrideLayoutParsingConfig(dict): + def __init__(__self__): + pass + + @pulumi.output_type class DataStoreDocumentProcessingConfigParsingConfigOverrideOcrParsingConfig(dict): @staticmethod diff --git a/sdk/python/pulumi_gcp/firebase/database_instance.py b/sdk/python/pulumi_gcp/firebase/database_instance.py index 1ff9ea6d43..4db3b8912d 100644 --- a/sdk/python/pulumi_gcp/firebase/database_instance.py +++ b/sdk/python/pulumi_gcp/firebase/database_instance.py @@ -33,7 +33,7 @@ def __init__(__self__, *, - - - :param pulumi.Input[str] region: A reference to the region where the Firebase Realtime database resides. Check all [available regions](https://firebase.google.com/docs/projects/locations#rtdb-locations) - :param pulumi.Input[str] desired_state: The intended database state. + :param pulumi.Input[str] desired_state: The intended database state. Possible values: ACTIVE, DISABLED. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] type: The database type. @@ -85,7 +85,7 @@ def region(self, value: pulumi.Input[str]): @pulumi.getter(name="desiredState") def desired_state(self) -> Optional[pulumi.Input[str]]: """ - The intended database state. + The intended database state. Possible values: ACTIVE, DISABLED. """ return pulumi.get(self, "desired_state") @@ -139,7 +139,7 @@ def __init__(__self__, *, Input properties used for looking up and filtering DatabaseInstance resources. :param pulumi.Input[str] database_url: The database URL in the form of https://{instance-id}.firebaseio.com for us-central1 instances or https://{instance-id}.{region}.firebasedatabase.app in other regions. - :param pulumi.Input[str] desired_state: The intended database state. + :param pulumi.Input[str] desired_state: The intended database state. Possible values: ACTIVE, DISABLED. :param pulumi.Input[str] instance_id: The globally unique identifier of the Firebase Realtime Database instance. Instance IDs cannot be reused after deletion. @@ -195,7 +195,7 @@ def database_url(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="desiredState") def desired_state(self) -> Optional[pulumi.Input[str]]: """ - The intended database state. + The intended database state. Possible values: ACTIVE, DISABLED. """ return pulumi.get(self, "desired_state") @@ -386,7 +386,7 @@ def __init__(__self__, :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. - :param pulumi.Input[str] desired_state: The intended database state. + :param pulumi.Input[str] desired_state: The intended database state. Possible values: ACTIVE, DISABLED. :param pulumi.Input[str] instance_id: The globally unique identifier of the Firebase Realtime Database instance. Instance IDs cannot be reused after deletion. @@ -560,7 +560,7 @@ def get(resource_name: str, :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] database_url: The database URL in the form of https://{instance-id}.firebaseio.com for us-central1 instances or https://{instance-id}.{region}.firebasedatabase.app in other regions. - :param pulumi.Input[str] desired_state: The intended database state. + :param pulumi.Input[str] desired_state: The intended database state. Possible values: ACTIVE, DISABLED. :param pulumi.Input[str] instance_id: The globally unique identifier of the Firebase Realtime Database instance. Instance IDs cannot be reused after deletion. @@ -609,7 +609,7 @@ def database_url(self) -> pulumi.Output[str]: @pulumi.getter(name="desiredState") def desired_state(self) -> pulumi.Output[Optional[str]]: """ - The intended database state. + The intended database state. Possible values: ACTIVE, DISABLED. """ return pulumi.get(self, "desired_state") diff --git a/sdk/python/pulumi_gcp/gkehub/_inputs.py b/sdk/python/pulumi_gcp/gkehub/_inputs.py index 449bb8327a..529de004fd 100644 --- a/sdk/python/pulumi_gcp/gkehub/_inputs.py +++ b/sdk/python/pulumi_gcp/gkehub/_inputs.py @@ -1717,7 +1717,9 @@ def description(self, value: Optional[pulumi.Input[str]]): class FeatureMembershipConfigmanagementArgsDict(TypedDict): binauthz: NotRequired[pulumi.Input['FeatureMembershipConfigmanagementBinauthzArgsDict']] """ + (Optional, Deprecated) Binauthz configuration for the cluster. Structure is documented below. + This field will be ignored and should not be set. """ config_sync: NotRequired[pulumi.Input['FeatureMembershipConfigmanagementConfigSyncArgsDict']] """ @@ -1726,6 +1728,10 @@ class FeatureMembershipConfigmanagementArgsDict(TypedDict): hierarchy_controller: NotRequired[pulumi.Input['FeatureMembershipConfigmanagementHierarchyControllerArgsDict']] """ Hierarchy Controller configuration for the cluster. Structure is documented below. + Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. + Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. + Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) + to migrate from Hierarchy Controller to HNC. """ management: NotRequired[pulumi.Input[str]] """ @@ -1734,6 +1740,8 @@ class FeatureMembershipConfigmanagementArgsDict(TypedDict): policy_controller: NotRequired[pulumi.Input['FeatureMembershipConfigmanagementPolicyControllerArgsDict']] """ Policy Controller configuration for the cluster. Structure is documented below. + Configuring Policy Controller through the configmanagement feature is no longer recommended. + Use the policycontroller feature instead. """ version: NotRequired[pulumi.Input[str]] """ @@ -1752,11 +1760,19 @@ def __init__(__self__, *, policy_controller: Optional[pulumi.Input['FeatureMembershipConfigmanagementPolicyControllerArgs']] = None, version: Optional[pulumi.Input[str]] = None): """ - :param pulumi.Input['FeatureMembershipConfigmanagementBinauthzArgs'] binauthz: Binauthz configuration for the cluster. Structure is documented below. + :param pulumi.Input['FeatureMembershipConfigmanagementBinauthzArgs'] binauthz: (Optional, Deprecated) + Binauthz configuration for the cluster. Structure is documented below. + This field will be ignored and should not be set. :param pulumi.Input['FeatureMembershipConfigmanagementConfigSyncArgs'] config_sync: Config Sync configuration for the cluster. Structure is documented below. :param pulumi.Input['FeatureMembershipConfigmanagementHierarchyControllerArgs'] hierarchy_controller: Hierarchy Controller configuration for the cluster. Structure is documented below. + Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. + Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. + Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) + to migrate from Hierarchy Controller to HNC. :param pulumi.Input[str] management: Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades. :param pulumi.Input['FeatureMembershipConfigmanagementPolicyControllerArgs'] policy_controller: Policy Controller configuration for the cluster. Structure is documented below. + Configuring Policy Controller through the configmanagement feature is no longer recommended. + Use the policycontroller feature instead. :param pulumi.Input[str] version: Version of ACM installed. """ if binauthz is not None: @@ -1776,7 +1792,9 @@ def __init__(__self__, *, @pulumi.getter def binauthz(self) -> Optional[pulumi.Input['FeatureMembershipConfigmanagementBinauthzArgs']]: """ + (Optional, Deprecated) Binauthz configuration for the cluster. Structure is documented below. + This field will be ignored and should not be set. """ return pulumi.get(self, "binauthz") @@ -1801,6 +1819,10 @@ def config_sync(self, value: Optional[pulumi.Input['FeatureMembershipConfigmanag def hierarchy_controller(self) -> Optional[pulumi.Input['FeatureMembershipConfigmanagementHierarchyControllerArgs']]: """ Hierarchy Controller configuration for the cluster. Structure is documented below. + Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. + Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. + Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) + to migrate from Hierarchy Controller to HNC. """ return pulumi.get(self, "hierarchy_controller") @@ -1825,6 +1847,8 @@ def management(self, value: Optional[pulumi.Input[str]]): def policy_controller(self) -> Optional[pulumi.Input['FeatureMembershipConfigmanagementPolicyControllerArgs']]: """ Policy Controller configuration for the cluster. Structure is documented below. + Configuring Policy Controller through the configmanagement feature is no longer recommended. + Use the policycontroller feature instead. """ return pulumi.get(self, "policy_controller") diff --git a/sdk/python/pulumi_gcp/gkehub/feature_membership.py b/sdk/python/pulumi_gcp/gkehub/feature_membership.py index 7d05947471..00451e936b 100644 --- a/sdk/python/pulumi_gcp/gkehub/feature_membership.py +++ b/sdk/python/pulumi_gcp/gkehub/feature_membership.py @@ -334,8 +334,9 @@ def __init__(__self__, feature=feature.name, membership=membership.membership_id, configmanagement={ - "version": "1.6.2", + "version": "1.19.0", "config_sync": { + "enabled": True, "git": { "sync_repo": "https://github.com/hashicorp/terraform", }, @@ -370,8 +371,9 @@ def __init__(__self__, feature=feature.name, membership=membership.membership_id, configmanagement={ - "version": "1.15.1", + "version": "1.19.0", "config_sync": { + "enabled": True, "oci": { "sync_repo": "us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest", "policy_dir": "config-connector", @@ -456,8 +458,9 @@ def __init__(__self__, membership=membership.membership_id, membership_location=membership.location, configmanagement={ - "version": "1.6.2", + "version": "1.19.0", "config_sync": { + "enabled": True, "git": { "sync_repo": "https://github.com/hashicorp/terraform", }, @@ -612,8 +615,9 @@ def __init__(__self__, feature=feature.name, membership=membership.membership_id, configmanagement={ - "version": "1.6.2", + "version": "1.19.0", "config_sync": { + "enabled": True, "git": { "sync_repo": "https://github.com/hashicorp/terraform", }, @@ -648,8 +652,9 @@ def __init__(__self__, feature=feature.name, membership=membership.membership_id, configmanagement={ - "version": "1.15.1", + "version": "1.19.0", "config_sync": { + "enabled": True, "oci": { "sync_repo": "us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest", "policy_dir": "config-connector", @@ -734,8 +739,9 @@ def __init__(__self__, membership=membership.membership_id, membership_location=membership.location, configmanagement={ - "version": "1.6.2", + "version": "1.19.0", "config_sync": { + "enabled": True, "git": { "sync_repo": "https://github.com/hashicorp/terraform", }, diff --git a/sdk/python/pulumi_gcp/gkehub/outputs.py b/sdk/python/pulumi_gcp/gkehub/outputs.py index 08a5551484..f89eb56891 100644 --- a/sdk/python/pulumi_gcp/gkehub/outputs.py +++ b/sdk/python/pulumi_gcp/gkehub/outputs.py @@ -1246,11 +1246,19 @@ def __init__(__self__, *, policy_controller: Optional['outputs.FeatureMembershipConfigmanagementPolicyController'] = None, version: Optional[str] = None): """ - :param 'FeatureMembershipConfigmanagementBinauthzArgs' binauthz: Binauthz configuration for the cluster. Structure is documented below. + :param 'FeatureMembershipConfigmanagementBinauthzArgs' binauthz: (Optional, Deprecated) + Binauthz configuration for the cluster. Structure is documented below. + This field will be ignored and should not be set. :param 'FeatureMembershipConfigmanagementConfigSyncArgs' config_sync: Config Sync configuration for the cluster. Structure is documented below. :param 'FeatureMembershipConfigmanagementHierarchyControllerArgs' hierarchy_controller: Hierarchy Controller configuration for the cluster. Structure is documented below. + Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. + Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. + Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) + to migrate from Hierarchy Controller to HNC. :param str management: Set this field to MANAGEMENT_AUTOMATIC to enable Config Sync auto-upgrades, and set this field to MANAGEMENT_MANUAL or MANAGEMENT_UNSPECIFIED to disable Config Sync auto-upgrades. :param 'FeatureMembershipConfigmanagementPolicyControllerArgs' policy_controller: Policy Controller configuration for the cluster. Structure is documented below. + Configuring Policy Controller through the configmanagement feature is no longer recommended. + Use the policycontroller feature instead. :param str version: Version of ACM installed. """ if binauthz is not None: @@ -1270,7 +1278,9 @@ def __init__(__self__, *, @pulumi.getter def binauthz(self) -> Optional['outputs.FeatureMembershipConfigmanagementBinauthz']: """ + (Optional, Deprecated) Binauthz configuration for the cluster. Structure is documented below. + This field will be ignored and should not be set. """ return pulumi.get(self, "binauthz") @@ -1287,6 +1297,10 @@ def config_sync(self) -> Optional['outputs.FeatureMembershipConfigmanagementConf def hierarchy_controller(self) -> Optional['outputs.FeatureMembershipConfigmanagementHierarchyController']: """ Hierarchy Controller configuration for the cluster. Structure is documented below. + Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. + Use open source Kubernetes [Hierarchical Namespace Controller (HNC)](https://github.com/kubernetes-sigs/hierarchical-namespaces) instead. + Follow the [instructions](https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/migrate-hierarchy-controller) + to migrate from Hierarchy Controller to HNC. """ return pulumi.get(self, "hierarchy_controller") @@ -1303,6 +1317,8 @@ def management(self) -> Optional[str]: def policy_controller(self) -> Optional['outputs.FeatureMembershipConfigmanagementPolicyController']: """ Policy Controller configuration for the cluster. Structure is documented below. + Configuring Policy Controller through the configmanagement feature is no longer recommended. + Use the policycontroller feature instead. """ return pulumi.get(self, "policy_controller") diff --git a/sdk/python/pulumi_gcp/iam/_inputs.py b/sdk/python/pulumi_gcp/iam/_inputs.py index c2ebdd39b3..32fcb3feb5 100644 --- a/sdk/python/pulumi_gcp/iam/_inputs.py +++ b/sdk/python/pulumi_gcp/iam/_inputs.py @@ -55,6 +55,14 @@ 'WorkloadIdentityPoolProviderOidcArgsDict', 'WorkloadIdentityPoolProviderSamlArgs', 'WorkloadIdentityPoolProviderSamlArgsDict', + 'WorkloadIdentityPoolProviderX509Args', + 'WorkloadIdentityPoolProviderX509ArgsDict', + 'WorkloadIdentityPoolProviderX509TrustStoreArgs', + 'WorkloadIdentityPoolProviderX509TrustStoreArgsDict', + 'WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs', + 'WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgsDict', + 'WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs', + 'WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgsDict', ] MYPY = False @@ -1567,6 +1575,8 @@ class WorkloadIdentityPoolProviderSamlArgsDict(TypedDict): idp_metadata_xml: pulumi.Input[str] """ SAML Identity provider configuration metadata xml doc. + + The `x509` block supports: """ elif False: WorkloadIdentityPoolProviderSamlArgsDict: TypeAlias = Mapping[str, Any] @@ -1577,6 +1587,8 @@ def __init__(__self__, *, idp_metadata_xml: pulumi.Input[str]): """ :param pulumi.Input[str] idp_metadata_xml: SAML Identity provider configuration metadata xml doc. + + The `x509` block supports: """ pulumi.set(__self__, "idp_metadata_xml", idp_metadata_xml) @@ -1585,6 +1597,8 @@ def __init__(__self__, *, def idp_metadata_xml(self) -> pulumi.Input[str]: """ SAML Identity provider configuration metadata xml doc. + + The `x509` block supports: """ return pulumi.get(self, "idp_metadata_xml") @@ -1593,3 +1607,185 @@ def idp_metadata_xml(self, value: pulumi.Input[str]): pulumi.set(self, "idp_metadata_xml", value) +if not MYPY: + class WorkloadIdentityPoolProviderX509ArgsDict(TypedDict): + trust_store: pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreArgsDict'] + """ + A Trust store, use this trust store as a wrapper to config the trust + anchor and optional intermediate cas to help build the trust chain for + the incoming end entity certificate. Follow the x509 guidelines to + define those PEM encoded certs. Only 1 trust store is currently + supported. + """ +elif False: + WorkloadIdentityPoolProviderX509ArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class WorkloadIdentityPoolProviderX509Args: + def __init__(__self__, *, + trust_store: pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreArgs']): + """ + :param pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreArgs'] trust_store: A Trust store, use this trust store as a wrapper to config the trust + anchor and optional intermediate cas to help build the trust chain for + the incoming end entity certificate. Follow the x509 guidelines to + define those PEM encoded certs. Only 1 trust store is currently + supported. + """ + pulumi.set(__self__, "trust_store", trust_store) + + @property + @pulumi.getter(name="trustStore") + def trust_store(self) -> pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreArgs']: + """ + A Trust store, use this trust store as a wrapper to config the trust + anchor and optional intermediate cas to help build the trust chain for + the incoming end entity certificate. Follow the x509 guidelines to + define those PEM encoded certs. Only 1 trust store is currently + supported. + """ + return pulumi.get(self, "trust_store") + + @trust_store.setter + def trust_store(self, value: pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreArgs']): + pulumi.set(self, "trust_store", value) + + +if not MYPY: + class WorkloadIdentityPoolProviderX509TrustStoreArgsDict(TypedDict): + trust_anchors: pulumi.Input[Sequence[pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgsDict']]] + """ + List of Trust Anchors to be used while performing validation + against a given TrustStore. The incoming end entity's certificate + must be chained up to one of the trust anchors here. + Structure is documented below. + """ + intermediate_cas: NotRequired[pulumi.Input[Sequence[pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgsDict']]]] + """ + Set of intermediate CA certificates used for building the trust chain to + trust anchor. + IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + Structure is documented below. + """ +elif False: + WorkloadIdentityPoolProviderX509TrustStoreArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class WorkloadIdentityPoolProviderX509TrustStoreArgs: + def __init__(__self__, *, + trust_anchors: pulumi.Input[Sequence[pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs']]], + intermediate_cas: Optional[pulumi.Input[Sequence[pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs']]]] = None): + """ + :param pulumi.Input[Sequence[pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs']]] trust_anchors: List of Trust Anchors to be used while performing validation + against a given TrustStore. The incoming end entity's certificate + must be chained up to one of the trust anchors here. + Structure is documented below. + :param pulumi.Input[Sequence[pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs']]] intermediate_cas: Set of intermediate CA certificates used for building the trust chain to + trust anchor. + IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + Structure is documented below. + """ + pulumi.set(__self__, "trust_anchors", trust_anchors) + if intermediate_cas is not None: + pulumi.set(__self__, "intermediate_cas", intermediate_cas) + + @property + @pulumi.getter(name="trustAnchors") + def trust_anchors(self) -> pulumi.Input[Sequence[pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs']]]: + """ + List of Trust Anchors to be used while performing validation + against a given TrustStore. The incoming end entity's certificate + must be chained up to one of the trust anchors here. + Structure is documented below. + """ + return pulumi.get(self, "trust_anchors") + + @trust_anchors.setter + def trust_anchors(self, value: pulumi.Input[Sequence[pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs']]]): + pulumi.set(self, "trust_anchors", value) + + @property + @pulumi.getter(name="intermediateCas") + def intermediate_cas(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs']]]]: + """ + Set of intermediate CA certificates used for building the trust chain to + trust anchor. + IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + Structure is documented below. + """ + return pulumi.get(self, "intermediate_cas") + + @intermediate_cas.setter + def intermediate_cas(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs']]]]): + pulumi.set(self, "intermediate_cas", value) + + +if not MYPY: + class WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgsDict(TypedDict): + pem_certificate: NotRequired[pulumi.Input[str]] + """ + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + """ +elif False: + WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs: + def __init__(__self__, *, + pem_certificate: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] pem_certificate: PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + """ + if pem_certificate is not None: + pulumi.set(__self__, "pem_certificate", pem_certificate) + + @property + @pulumi.getter(name="pemCertificate") + def pem_certificate(self) -> Optional[pulumi.Input[str]]: + """ + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + """ + return pulumi.get(self, "pem_certificate") + + @pem_certificate.setter + def pem_certificate(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "pem_certificate", value) + + +if not MYPY: + class WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgsDict(TypedDict): + pem_certificate: NotRequired[pulumi.Input[str]] + """ + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + """ +elif False: + WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs: + def __init__(__self__, *, + pem_certificate: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] pem_certificate: PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + """ + if pem_certificate is not None: + pulumi.set(__self__, "pem_certificate", pem_certificate) + + @property + @pulumi.getter(name="pemCertificate") + def pem_certificate(self) -> Optional[pulumi.Input[str]]: + """ + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + """ + return pulumi.get(self, "pem_certificate") + + @pem_certificate.setter + def pem_certificate(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "pem_certificate", value) + + diff --git a/sdk/python/pulumi_gcp/iam/get_workload_identity_pool_provider.py b/sdk/python/pulumi_gcp/iam/get_workload_identity_pool_provider.py index 047329fea0..52f1fd74fe 100644 --- a/sdk/python/pulumi_gcp/iam/get_workload_identity_pool_provider.py +++ b/sdk/python/pulumi_gcp/iam/get_workload_identity_pool_provider.py @@ -27,7 +27,7 @@ class GetWorkloadIdentityPoolProviderResult: """ A collection of values returned by getWorkloadIdentityPoolProvider. """ - def __init__(__self__, attribute_condition=None, attribute_mapping=None, aws=None, description=None, disabled=None, display_name=None, id=None, name=None, oidcs=None, project=None, samls=None, state=None, workload_identity_pool_id=None, workload_identity_pool_provider_id=None): + def __init__(__self__, attribute_condition=None, attribute_mapping=None, aws=None, description=None, disabled=None, display_name=None, id=None, name=None, oidcs=None, project=None, samls=None, state=None, workload_identity_pool_id=None, workload_identity_pool_provider_id=None, x509s=None): if attribute_condition and not isinstance(attribute_condition, str): raise TypeError("Expected argument 'attribute_condition' to be a str") pulumi.set(__self__, "attribute_condition", attribute_condition) @@ -70,6 +70,9 @@ def __init__(__self__, attribute_condition=None, attribute_mapping=None, aws=Non if workload_identity_pool_provider_id and not isinstance(workload_identity_pool_provider_id, str): raise TypeError("Expected argument 'workload_identity_pool_provider_id' to be a str") pulumi.set(__self__, "workload_identity_pool_provider_id", workload_identity_pool_provider_id) + if x509s and not isinstance(x509s, list): + raise TypeError("Expected argument 'x509s' to be a list") + pulumi.set(__self__, "x509s", x509s) @property @pulumi.getter(name="attributeCondition") @@ -144,6 +147,11 @@ def workload_identity_pool_id(self) -> str: def workload_identity_pool_provider_id(self) -> str: return pulumi.get(self, "workload_identity_pool_provider_id") + @property + @pulumi.getter + def x509s(self) -> Sequence['outputs.GetWorkloadIdentityPoolProviderX509Result']: + return pulumi.get(self, "x509s") + class AwaitableGetWorkloadIdentityPoolProviderResult(GetWorkloadIdentityPoolProviderResult): # pylint: disable=using-constant-test @@ -164,7 +172,8 @@ def __await__(self): samls=self.samls, state=self.state, workload_identity_pool_id=self.workload_identity_pool_id, - workload_identity_pool_provider_id=self.workload_identity_pool_provider_id) + workload_identity_pool_provider_id=self.workload_identity_pool_provider_id, + x509s=self.x509s) def get_workload_identity_pool_provider(project: Optional[str] = None, @@ -215,7 +224,8 @@ def get_workload_identity_pool_provider(project: Optional[str] = None, samls=pulumi.get(__ret__, 'samls'), state=pulumi.get(__ret__, 'state'), workload_identity_pool_id=pulumi.get(__ret__, 'workload_identity_pool_id'), - workload_identity_pool_provider_id=pulumi.get(__ret__, 'workload_identity_pool_provider_id')) + workload_identity_pool_provider_id=pulumi.get(__ret__, 'workload_identity_pool_provider_id'), + x509s=pulumi.get(__ret__, 'x509s')) @_utilities.lift_output_func(get_workload_identity_pool_provider) diff --git a/sdk/python/pulumi_gcp/iam/outputs.py b/sdk/python/pulumi_gcp/iam/outputs.py index 8c5eaa8bf7..caf8b61fd5 100644 --- a/sdk/python/pulumi_gcp/iam/outputs.py +++ b/sdk/python/pulumi_gcp/iam/outputs.py @@ -36,10 +36,18 @@ 'WorkloadIdentityPoolProviderAws', 'WorkloadIdentityPoolProviderOidc', 'WorkloadIdentityPoolProviderSaml', + 'WorkloadIdentityPoolProviderX509', + 'WorkloadIdentityPoolProviderX509TrustStore', + 'WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa', + 'WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor', 'GetTestablePermissionsPermissionResult', 'GetWorkloadIdentityPoolProviderAwResult', 'GetWorkloadIdentityPoolProviderOidcResult', 'GetWorkloadIdentityPoolProviderSamlResult', + 'GetWorkloadIdentityPoolProviderX509Result', + 'GetWorkloadIdentityPoolProviderX509TrustStoreResult', + 'GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaResult', + 'GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorResult', ] @pulumi.output_type @@ -1238,6 +1246,8 @@ def __init__(__self__, *, idp_metadata_xml: str): """ :param str idp_metadata_xml: SAML Identity provider configuration metadata xml doc. + + The `x509` block supports: """ pulumi.set(__self__, "idp_metadata_xml", idp_metadata_xml) @@ -1246,10 +1256,192 @@ def __init__(__self__, *, def idp_metadata_xml(self) -> str: """ SAML Identity provider configuration metadata xml doc. + + The `x509` block supports: """ return pulumi.get(self, "idp_metadata_xml") +@pulumi.output_type +class WorkloadIdentityPoolProviderX509(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "trustStore": + suggest = "trust_store" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in WorkloadIdentityPoolProviderX509. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + WorkloadIdentityPoolProviderX509.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + WorkloadIdentityPoolProviderX509.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + trust_store: 'outputs.WorkloadIdentityPoolProviderX509TrustStore'): + """ + :param 'WorkloadIdentityPoolProviderX509TrustStoreArgs' trust_store: A Trust store, use this trust store as a wrapper to config the trust + anchor and optional intermediate cas to help build the trust chain for + the incoming end entity certificate. Follow the x509 guidelines to + define those PEM encoded certs. Only 1 trust store is currently + supported. + """ + pulumi.set(__self__, "trust_store", trust_store) + + @property + @pulumi.getter(name="trustStore") + def trust_store(self) -> 'outputs.WorkloadIdentityPoolProviderX509TrustStore': + """ + A Trust store, use this trust store as a wrapper to config the trust + anchor and optional intermediate cas to help build the trust chain for + the incoming end entity certificate. Follow the x509 guidelines to + define those PEM encoded certs. Only 1 trust store is currently + supported. + """ + return pulumi.get(self, "trust_store") + + +@pulumi.output_type +class WorkloadIdentityPoolProviderX509TrustStore(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "trustAnchors": + suggest = "trust_anchors" + elif key == "intermediateCas": + suggest = "intermediate_cas" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in WorkloadIdentityPoolProviderX509TrustStore. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + WorkloadIdentityPoolProviderX509TrustStore.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + WorkloadIdentityPoolProviderX509TrustStore.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + trust_anchors: Sequence['outputs.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor'], + intermediate_cas: Optional[Sequence['outputs.WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa']] = None): + """ + :param Sequence['WorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs'] trust_anchors: List of Trust Anchors to be used while performing validation + against a given TrustStore. The incoming end entity's certificate + must be chained up to one of the trust anchors here. + Structure is documented below. + :param Sequence['WorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs'] intermediate_cas: Set of intermediate CA certificates used for building the trust chain to + trust anchor. + IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + Structure is documented below. + """ + pulumi.set(__self__, "trust_anchors", trust_anchors) + if intermediate_cas is not None: + pulumi.set(__self__, "intermediate_cas", intermediate_cas) + + @property + @pulumi.getter(name="trustAnchors") + def trust_anchors(self) -> Sequence['outputs.WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor']: + """ + List of Trust Anchors to be used while performing validation + against a given TrustStore. The incoming end entity's certificate + must be chained up to one of the trust anchors here. + Structure is documented below. + """ + return pulumi.get(self, "trust_anchors") + + @property + @pulumi.getter(name="intermediateCas") + def intermediate_cas(self) -> Optional[Sequence['outputs.WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa']]: + """ + Set of intermediate CA certificates used for building the trust chain to + trust anchor. + IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + Structure is documented below. + """ + return pulumi.get(self, "intermediate_cas") + + +@pulumi.output_type +class WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "pemCertificate": + suggest = "pem_certificate" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + WorkloadIdentityPoolProviderX509TrustStoreIntermediateCa.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + pem_certificate: Optional[str] = None): + """ + :param str pem_certificate: PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + """ + if pem_certificate is not None: + pulumi.set(__self__, "pem_certificate", pem_certificate) + + @property + @pulumi.getter(name="pemCertificate") + def pem_certificate(self) -> Optional[str]: + """ + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + """ + return pulumi.get(self, "pem_certificate") + + +@pulumi.output_type +class WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "pemCertificate": + suggest = "pem_certificate" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + WorkloadIdentityPoolProviderX509TrustStoreTrustAnchor.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + pem_certificate: Optional[str] = None): + """ + :param str pem_certificate: PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + """ + if pem_certificate is not None: + pulumi.set(__self__, "pem_certificate", pem_certificate) + + @property + @pulumi.getter(name="pemCertificate") + def pem_certificate(self) -> Optional[str]: + """ + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + """ + return pulumi.get(self, "pem_certificate") + + @pulumi.output_type class GetTestablePermissionsPermissionResult(dict): def __init__(__self__, *, @@ -1454,3 +1646,106 @@ def idp_metadata_xml(self) -> str: return pulumi.get(self, "idp_metadata_xml") +@pulumi.output_type +class GetWorkloadIdentityPoolProviderX509Result(dict): + def __init__(__self__, *, + trust_stores: Sequence['outputs.GetWorkloadIdentityPoolProviderX509TrustStoreResult']): + """ + :param Sequence['GetWorkloadIdentityPoolProviderX509TrustStoreArgs'] trust_stores: A Trust store, use this trust store as a wrapper to config the trust + anchor and optional intermediate cas to help build the trust chain for + the incoming end entity certificate. Follow the x509 guidelines to + define those PEM encoded certs. Only 1 trust store is currently + supported. + """ + pulumi.set(__self__, "trust_stores", trust_stores) + + @property + @pulumi.getter(name="trustStores") + def trust_stores(self) -> Sequence['outputs.GetWorkloadIdentityPoolProviderX509TrustStoreResult']: + """ + A Trust store, use this trust store as a wrapper to config the trust + anchor and optional intermediate cas to help build the trust chain for + the incoming end entity certificate. Follow the x509 guidelines to + define those PEM encoded certs. Only 1 trust store is currently + supported. + """ + return pulumi.get(self, "trust_stores") + + +@pulumi.output_type +class GetWorkloadIdentityPoolProviderX509TrustStoreResult(dict): + def __init__(__self__, *, + intermediate_cas: Sequence['outputs.GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaResult'], + trust_anchors: Sequence['outputs.GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorResult']): + """ + :param Sequence['GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaArgs'] intermediate_cas: Set of intermediate CA certificates used for building the trust chain to + trust anchor. + IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + :param Sequence['GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorArgs'] trust_anchors: List of Trust Anchors to be used while performing validation + against a given TrustStore. The incoming end entity's certificate + must be chained up to one of the trust anchors here. + """ + pulumi.set(__self__, "intermediate_cas", intermediate_cas) + pulumi.set(__self__, "trust_anchors", trust_anchors) + + @property + @pulumi.getter(name="intermediateCas") + def intermediate_cas(self) -> Sequence['outputs.GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaResult']: + """ + Set of intermediate CA certificates used for building the trust chain to + trust anchor. + IMPORTANT: Intermediate CAs are only supported when configuring x509 federation. + """ + return pulumi.get(self, "intermediate_cas") + + @property + @pulumi.getter(name="trustAnchors") + def trust_anchors(self) -> Sequence['outputs.GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorResult']: + """ + List of Trust Anchors to be used while performing validation + against a given TrustStore. The incoming end entity's certificate + must be chained up to one of the trust anchors here. + """ + return pulumi.get(self, "trust_anchors") + + +@pulumi.output_type +class GetWorkloadIdentityPoolProviderX509TrustStoreIntermediateCaResult(dict): + def __init__(__self__, *, + pem_certificate: str): + """ + :param str pem_certificate: PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + """ + pulumi.set(__self__, "pem_certificate", pem_certificate) + + @property + @pulumi.getter(name="pemCertificate") + def pem_certificate(self) -> str: + """ + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + """ + return pulumi.get(self, "pem_certificate") + + +@pulumi.output_type +class GetWorkloadIdentityPoolProviderX509TrustStoreTrustAnchorResult(dict): + def __init__(__self__, *, + pem_certificate: str): + """ + :param str pem_certificate: PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + """ + pulumi.set(__self__, "pem_certificate", pem_certificate) + + @property + @pulumi.getter(name="pemCertificate") + def pem_certificate(self) -> str: + """ + PEM certificate of the PKI used for validation. Must only contain one + ca certificate(either root or intermediate cert). + """ + return pulumi.get(self, "pem_certificate") + + diff --git a/sdk/python/pulumi_gcp/iam/workload_identity_pool_provider.py b/sdk/python/pulumi_gcp/iam/workload_identity_pool_provider.py index dd145d0202..f3054b7ad5 100644 --- a/sdk/python/pulumi_gcp/iam/workload_identity_pool_provider.py +++ b/sdk/python/pulumi_gcp/iam/workload_identity_pool_provider.py @@ -31,7 +31,8 @@ def __init__(__self__, *, display_name: Optional[pulumi.Input[str]] = None, oidc: Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']] = None, project: Optional[pulumi.Input[str]] = None, - saml: Optional[pulumi.Input['WorkloadIdentityPoolProviderSamlArgs']] = None): + saml: Optional[pulumi.Input['WorkloadIdentityPoolProviderSamlArgs']] = None, + x509: Optional[pulumi.Input['WorkloadIdentityPoolProviderX509Args']] = None): """ The set of arguments for constructing a WorkloadIdentityPoolProvider resource. :param pulumi.Input[str] workload_identity_pool_id: The ID used for the pool, which is the final component of the pool resource name. This @@ -111,6 +112,9 @@ def __init__(__self__, *, If it is not provided, the provider project is used. :param pulumi.Input['WorkloadIdentityPoolProviderSamlArgs'] saml: An SAML 2.0 identity provider. Not compatible with the property oidc or aws. Structure is documented below. + :param pulumi.Input['WorkloadIdentityPoolProviderX509Args'] x509: An X.509-type identity provider represents a CA. It is trusted to assert a + client identity if the client has a certificate that chains up to this CA. + Structure is documented below. """ pulumi.set(__self__, "workload_identity_pool_id", workload_identity_pool_id) pulumi.set(__self__, "workload_identity_pool_provider_id", workload_identity_pool_provider_id) @@ -132,6 +136,8 @@ def __init__(__self__, *, pulumi.set(__self__, "project", project) if saml is not None: pulumi.set(__self__, "saml", saml) + if x509 is not None: + pulumi.set(__self__, "x509", x509) @property @pulumi.getter(name="workloadIdentityPoolId") @@ -331,6 +337,20 @@ def saml(self) -> Optional[pulumi.Input['WorkloadIdentityPoolProviderSamlArgs']] def saml(self, value: Optional[pulumi.Input['WorkloadIdentityPoolProviderSamlArgs']]): pulumi.set(self, "saml", value) + @property + @pulumi.getter + def x509(self) -> Optional[pulumi.Input['WorkloadIdentityPoolProviderX509Args']]: + """ + An X.509-type identity provider represents a CA. It is trusted to assert a + client identity if the client has a certificate that chains up to this CA. + Structure is documented below. + """ + return pulumi.get(self, "x509") + + @x509.setter + def x509(self, value: Optional[pulumi.Input['WorkloadIdentityPoolProviderX509Args']]): + pulumi.set(self, "x509", value) + @pulumi.input_type class _WorkloadIdentityPoolProviderState: @@ -347,7 +367,8 @@ def __init__(__self__, *, saml: Optional[pulumi.Input['WorkloadIdentityPoolProviderSamlArgs']] = None, state: Optional[pulumi.Input[str]] = None, workload_identity_pool_id: Optional[pulumi.Input[str]] = None, - workload_identity_pool_provider_id: Optional[pulumi.Input[str]] = None): + workload_identity_pool_provider_id: Optional[pulumi.Input[str]] = None, + x509: Optional[pulumi.Input['WorkloadIdentityPoolProviderX509Args']] = None): """ Input properties used for looking up and filtering WorkloadIdentityPoolProvider resources. :param pulumi.Input[str] attribute_condition: [A Common Expression Language](https://opensource.google/projects/cel) expression, in @@ -436,6 +457,9 @@ def __init__(__self__, *, - - - + :param pulumi.Input['WorkloadIdentityPoolProviderX509Args'] x509: An X.509-type identity provider represents a CA. It is trusted to assert a + client identity if the client has a certificate that chains up to this CA. + Structure is documented below. """ if attribute_condition is not None: pulumi.set(__self__, "attribute_condition", attribute_condition) @@ -463,6 +487,8 @@ def __init__(__self__, *, pulumi.set(__self__, "workload_identity_pool_id", workload_identity_pool_id) if workload_identity_pool_provider_id is not None: pulumi.set(__self__, "workload_identity_pool_provider_id", workload_identity_pool_provider_id) + if x509 is not None: + pulumi.set(__self__, "x509", x509) @property @pulumi.getter(name="attributeCondition") @@ -693,6 +719,20 @@ def workload_identity_pool_provider_id(self) -> Optional[pulumi.Input[str]]: def workload_identity_pool_provider_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "workload_identity_pool_provider_id", value) + @property + @pulumi.getter + def x509(self) -> Optional[pulumi.Input['WorkloadIdentityPoolProviderX509Args']]: + """ + An X.509-type identity provider represents a CA. It is trusted to assert a + client identity if the client has a certificate that chains up to this CA. + Structure is documented below. + """ + return pulumi.get(self, "x509") + + @x509.setter + def x509(self, value: Optional[pulumi.Input['WorkloadIdentityPoolProviderX509Args']]): + pulumi.set(self, "x509", value) + class WorkloadIdentityPoolProvider(pulumi.CustomResource): @overload @@ -710,6 +750,7 @@ def __init__(__self__, saml: Optional[pulumi.Input[Union['WorkloadIdentityPoolProviderSamlArgs', 'WorkloadIdentityPoolProviderSamlArgsDict']]] = None, workload_identity_pool_id: Optional[pulumi.Input[str]] = None, workload_identity_pool_provider_id: Optional[pulumi.Input[str]] = None, + x509: Optional[pulumi.Input[Union['WorkloadIdentityPoolProviderX509Args', 'WorkloadIdentityPoolProviderX509ArgsDict']]] = None, __props__=None): """ A configuration for an external identity provider. @@ -882,6 +923,56 @@ def __init__(__self__, "jwks_json": "{\\"keys\\":[{\\"kty\\":\\"RSA\\",\\"alg\\":\\"RS256\\",\\"kid\\":\\"sif0AR-F6MuvksAyAOv-Pds08Bcf2eUMlxE30NofddA\\",\\"use\\":\\"sig\\",\\"e\\":\\"AQAB\\",\\"n\\":\\"ylH1Chl1tpfti3lh51E1g5dPogzXDaQseqjsefGLknaNl5W6Wd4frBhHyE2t41Q5zgz_Ll0-NvWm0FlaG6brhrN9QZu6sJP1bM8WPfJVPgXOanxi7d7TXCkeNubGeiLTf5R3UXtS9Lm_guemU7MxDjDTelxnlgGCihOVTcL526suNJUdfXtpwUsvdU6_ZnAp9IpsuYjCtwPm9hPumlcZGMbxstdh07O4y4O90cVQClJOKSGQjAUCKJWXIQ0cqffGS_HuS_725CPzQ85SzYZzaNpgfhAER7kx_9P16ARM3BJz0PI5fe2hECE61J4GYU_BY43sxDfs7HyJpEXKLU9eWw\\"}]}", }) ``` + ### Iam Workload Identity Pool Provider X509 Basic + + ```python + import pulumi + import pulumi_gcp as gcp + import pulumi_std as std + + pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool") + example = gcp.iam.WorkloadIdentityPoolProvider("example", + workload_identity_pool_id=pool.workload_identity_pool_id, + workload_identity_pool_provider_id="example-prvdr", + attribute_mapping={ + "google.subject": "assertion.subject.dn.cn", + }, + x509={ + "trust_store": { + "trust_anchors": [{ + "pem_certificate": std.file(input="test-fixtures/trust_anchor.pem").result, + }], + }, + }) + ``` + ### Iam Workload Identity Pool Provider X509 Full + + ```python + import pulumi + import pulumi_gcp as gcp + import pulumi_std as std + + pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool") + example = gcp.iam.WorkloadIdentityPoolProvider("example", + workload_identity_pool_id=pool.workload_identity_pool_id, + workload_identity_pool_provider_id="example-prvdr", + display_name="Name of provider", + description="X.509 identity pool provider for automated test", + disabled=True, + attribute_mapping={ + "google.subject": "assertion.subject.dn.cn", + }, + x509={ + "trust_store": { + "trust_anchors": [{ + "pem_certificate": std.file(input="test-fixtures/trust_anchor.pem").result, + }], + "intermediate_cas": [{ + "pem_certificate": std.file(input="test-fixtures/intermediate_ca.pem").result, + }], + }, + }) + ``` ## Import @@ -986,6 +1077,9 @@ def __init__(__self__, - - - + :param pulumi.Input[Union['WorkloadIdentityPoolProviderX509Args', 'WorkloadIdentityPoolProviderX509ArgsDict']] x509: An X.509-type identity provider represents a CA. It is trusted to assert a + client identity if the client has a certificate that chains up to this CA. + Structure is documented below. """ ... @overload @@ -1164,6 +1258,56 @@ def __init__(__self__, "jwks_json": "{\\"keys\\":[{\\"kty\\":\\"RSA\\",\\"alg\\":\\"RS256\\",\\"kid\\":\\"sif0AR-F6MuvksAyAOv-Pds08Bcf2eUMlxE30NofddA\\",\\"use\\":\\"sig\\",\\"e\\":\\"AQAB\\",\\"n\\":\\"ylH1Chl1tpfti3lh51E1g5dPogzXDaQseqjsefGLknaNl5W6Wd4frBhHyE2t41Q5zgz_Ll0-NvWm0FlaG6brhrN9QZu6sJP1bM8WPfJVPgXOanxi7d7TXCkeNubGeiLTf5R3UXtS9Lm_guemU7MxDjDTelxnlgGCihOVTcL526suNJUdfXtpwUsvdU6_ZnAp9IpsuYjCtwPm9hPumlcZGMbxstdh07O4y4O90cVQClJOKSGQjAUCKJWXIQ0cqffGS_HuS_725CPzQ85SzYZzaNpgfhAER7kx_9P16ARM3BJz0PI5fe2hECE61J4GYU_BY43sxDfs7HyJpEXKLU9eWw\\"}]}", }) ``` + ### Iam Workload Identity Pool Provider X509 Basic + + ```python + import pulumi + import pulumi_gcp as gcp + import pulumi_std as std + + pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool") + example = gcp.iam.WorkloadIdentityPoolProvider("example", + workload_identity_pool_id=pool.workload_identity_pool_id, + workload_identity_pool_provider_id="example-prvdr", + attribute_mapping={ + "google.subject": "assertion.subject.dn.cn", + }, + x509={ + "trust_store": { + "trust_anchors": [{ + "pem_certificate": std.file(input="test-fixtures/trust_anchor.pem").result, + }], + }, + }) + ``` + ### Iam Workload Identity Pool Provider X509 Full + + ```python + import pulumi + import pulumi_gcp as gcp + import pulumi_std as std + + pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool") + example = gcp.iam.WorkloadIdentityPoolProvider("example", + workload_identity_pool_id=pool.workload_identity_pool_id, + workload_identity_pool_provider_id="example-prvdr", + display_name="Name of provider", + description="X.509 identity pool provider for automated test", + disabled=True, + attribute_mapping={ + "google.subject": "assertion.subject.dn.cn", + }, + x509={ + "trust_store": { + "trust_anchors": [{ + "pem_certificate": std.file(input="test-fixtures/trust_anchor.pem").result, + }], + "intermediate_cas": [{ + "pem_certificate": std.file(input="test-fixtures/intermediate_ca.pem").result, + }], + }, + }) + ``` ## Import @@ -1215,6 +1359,7 @@ def _internal_init(__self__, saml: Optional[pulumi.Input[Union['WorkloadIdentityPoolProviderSamlArgs', 'WorkloadIdentityPoolProviderSamlArgsDict']]] = None, workload_identity_pool_id: Optional[pulumi.Input[str]] = None, workload_identity_pool_provider_id: Optional[pulumi.Input[str]] = None, + x509: Optional[pulumi.Input[Union['WorkloadIdentityPoolProviderX509Args', 'WorkloadIdentityPoolProviderX509ArgsDict']]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): @@ -1239,6 +1384,7 @@ def _internal_init(__self__, if workload_identity_pool_provider_id is None and not opts.urn: raise TypeError("Missing required property 'workload_identity_pool_provider_id'") __props__.__dict__["workload_identity_pool_provider_id"] = workload_identity_pool_provider_id + __props__.__dict__["x509"] = x509 __props__.__dict__["name"] = None __props__.__dict__["state"] = None super(WorkloadIdentityPoolProvider, __self__).__init__( @@ -1263,7 +1409,8 @@ def get(resource_name: str, saml: Optional[pulumi.Input[Union['WorkloadIdentityPoolProviderSamlArgs', 'WorkloadIdentityPoolProviderSamlArgsDict']]] = None, state: Optional[pulumi.Input[str]] = None, workload_identity_pool_id: Optional[pulumi.Input[str]] = None, - workload_identity_pool_provider_id: Optional[pulumi.Input[str]] = None) -> 'WorkloadIdentityPoolProvider': + workload_identity_pool_provider_id: Optional[pulumi.Input[str]] = None, + x509: Optional[pulumi.Input[Union['WorkloadIdentityPoolProviderX509Args', 'WorkloadIdentityPoolProviderX509ArgsDict']]] = None) -> 'WorkloadIdentityPoolProvider': """ Get an existing WorkloadIdentityPoolProvider resource's state with the given name, id, and optional extra properties used to qualify the lookup. @@ -1357,6 +1504,9 @@ def get(resource_name: str, - - - + :param pulumi.Input[Union['WorkloadIdentityPoolProviderX509Args', 'WorkloadIdentityPoolProviderX509ArgsDict']] x509: An X.509-type identity provider represents a CA. It is trusted to assert a + client identity if the client has a certificate that chains up to this CA. + Structure is documented below. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) @@ -1375,6 +1525,7 @@ def get(resource_name: str, __props__.__dict__["state"] = state __props__.__dict__["workload_identity_pool_id"] = workload_identity_pool_id __props__.__dict__["workload_identity_pool_provider_id"] = workload_identity_pool_provider_id + __props__.__dict__["x509"] = x509 return WorkloadIdentityPoolProvider(resource_name, opts=opts, __props__=__props__) @property @@ -1554,3 +1705,13 @@ def workload_identity_pool_provider_id(self) -> pulumi.Output[str]: """ return pulumi.get(self, "workload_identity_pool_provider_id") + @property + @pulumi.getter + def x509(self) -> pulumi.Output[Optional['outputs.WorkloadIdentityPoolProviderX509']]: + """ + An X.509-type identity provider represents a CA. It is trusted to assert a + client identity if the client has a certificate that chains up to this CA. + Structure is documented below. + """ + return pulumi.get(self, "x509") + diff --git a/sdk/python/pulumi_gcp/kms/__init__.py b/sdk/python/pulumi_gcp/kms/__init__.py index 53f8cbe911..469e349191 100644 --- a/sdk/python/pulumi_gcp/kms/__init__.py +++ b/sdk/python/pulumi_gcp/kms/__init__.py @@ -16,6 +16,8 @@ from .ekm_connection_iam_member import * from .ekm_connection_iam_policy import * from .get_crypto_key_iam_policy import * +from .get_crypto_key_latest_version import * +from .get_crypto_key_versions import * from .get_crypto_keys import * from .get_ekm_connection_iam_policy import * from .get_key_ring_iam_policy import * diff --git a/sdk/python/pulumi_gcp/kms/autokey_config.py b/sdk/python/pulumi_gcp/kms/autokey_config.py index 794356775a..2a6dfdc6a2 100644 --- a/sdk/python/pulumi_gcp/kms/autokey_config.py +++ b/sdk/python/pulumi_gcp/kms/autokey_config.py @@ -174,9 +174,13 @@ def __init__(__self__, wait_srv_acc_permissions = time.index.Sleep("wait_srv_acc_permissions", create_duration=10s, opts = pulumi.ResourceOptions(depends_on=[autokey_project_admin])) example_autokeyconfig = gcp.kms.AutokeyConfig("example-autokeyconfig", - folder=autokms_folder.folder_id, + folder=autokms_folder.id, key_project=key_project.project_id.apply(lambda project_id: f"projects/{project_id}"), opts = pulumi.ResourceOptions(depends_on=[wait_srv_acc_permissions])) + # Wait delay after setting AutokeyConfig, to prevent diffs on reapply, + # because setting the config takes a little to fully propagate. + wait_autokey_propagation = time.index.Sleep("wait_autokey_propagation", create_duration=30s, + opts = pulumi.ResourceOptions(depends_on=[example_autokeyconfig])) ``` ## Import @@ -264,9 +268,13 @@ def __init__(__self__, wait_srv_acc_permissions = time.index.Sleep("wait_srv_acc_permissions", create_duration=10s, opts = pulumi.ResourceOptions(depends_on=[autokey_project_admin])) example_autokeyconfig = gcp.kms.AutokeyConfig("example-autokeyconfig", - folder=autokms_folder.folder_id, + folder=autokms_folder.id, key_project=key_project.project_id.apply(lambda project_id: f"projects/{project_id}"), opts = pulumi.ResourceOptions(depends_on=[wait_srv_acc_permissions])) + # Wait delay after setting AutokeyConfig, to prevent diffs on reapply, + # because setting the config takes a little to fully propagate. + wait_autokey_propagation = time.index.Sleep("wait_autokey_propagation", create_duration=30s, + opts = pulumi.ResourceOptions(depends_on=[example_autokeyconfig])) ``` ## Import diff --git a/sdk/python/pulumi_gcp/kms/get_crypto_key_latest_version.py b/sdk/python/pulumi_gcp/kms/get_crypto_key_latest_version.py new file mode 100644 index 0000000000..53f42c4b20 --- /dev/null +++ b/sdk/python/pulumi_gcp/kms/get_crypto_key_latest_version.py @@ -0,0 +1,222 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import sys +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +if sys.version_info >= (3, 11): + from typing import NotRequired, TypedDict, TypeAlias +else: + from typing_extensions import NotRequired, TypedDict, TypeAlias +from .. import _utilities +from . import outputs + +__all__ = [ + 'GetCryptoKeyLatestVersionResult', + 'AwaitableGetCryptoKeyLatestVersionResult', + 'get_crypto_key_latest_version', + 'get_crypto_key_latest_version_output', +] + +@pulumi.output_type +class GetCryptoKeyLatestVersionResult: + """ + A collection of values returned by getCryptoKeyLatestVersion. + """ + def __init__(__self__, algorithm=None, crypto_key=None, filter=None, id=None, name=None, protection_level=None, public_keys=None, state=None, version=None): + if algorithm and not isinstance(algorithm, str): + raise TypeError("Expected argument 'algorithm' to be a str") + pulumi.set(__self__, "algorithm", algorithm) + if crypto_key and not isinstance(crypto_key, str): + raise TypeError("Expected argument 'crypto_key' to be a str") + pulumi.set(__self__, "crypto_key", crypto_key) + if filter and not isinstance(filter, str): + raise TypeError("Expected argument 'filter' to be a str") + pulumi.set(__self__, "filter", filter) + if id and not isinstance(id, str): + raise TypeError("Expected argument 'id' to be a str") + pulumi.set(__self__, "id", id) + if name and not isinstance(name, str): + raise TypeError("Expected argument 'name' to be a str") + pulumi.set(__self__, "name", name) + if protection_level and not isinstance(protection_level, str): + raise TypeError("Expected argument 'protection_level' to be a str") + pulumi.set(__self__, "protection_level", protection_level) + if public_keys and not isinstance(public_keys, list): + raise TypeError("Expected argument 'public_keys' to be a list") + pulumi.set(__self__, "public_keys", public_keys) + if state and not isinstance(state, str): + raise TypeError("Expected argument 'state' to be a str") + pulumi.set(__self__, "state", state) + if version and not isinstance(version, int): + raise TypeError("Expected argument 'version' to be a int") + pulumi.set(__self__, "version", version) + + @property + @pulumi.getter + def algorithm(self) -> str: + """ + The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + """ + return pulumi.get(self, "algorithm") + + @property + @pulumi.getter(name="cryptoKey") + def crypto_key(self) -> str: + return pulumi.get(self, "crypto_key") + + @property + @pulumi.getter + def filter(self) -> Optional[str]: + return pulumi.get(self, "filter") + + @property + @pulumi.getter + def id(self) -> str: + """ + The provider-assigned unique ID for this managed resource. + """ + return pulumi.get(self, "id") + + @property + @pulumi.getter + def name(self) -> str: + return pulumi.get(self, "name") + + @property + @pulumi.getter(name="protectionLevel") + def protection_level(self) -> str: + """ + The ProtectionLevel describing how crypto operations are performed with this CryptoKeyVersion. See the [protection_level reference](https://cloud.google.com/kms/docs/reference/rest/v1/ProtectionLevel) for possible outputs. + """ + return pulumi.get(self, "protection_level") + + @property + @pulumi.getter(name="publicKeys") + def public_keys(self) -> Sequence['outputs.GetCryptoKeyLatestVersionPublicKeyResult']: + """ + If the enclosing CryptoKey has purpose `ASYMMETRIC_SIGN` or `ASYMMETRIC_DECRYPT`, this block contains details about the public key associated to this CryptoKeyVersion. Structure is documented below. + """ + return pulumi.get(self, "public_keys") + + @property + @pulumi.getter + def state(self) -> str: + """ + The current state of the latest CryptoKeyVersion. See the [state reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions#CryptoKeyVersion.CryptoKeyVersionState) for possible outputs. + """ + return pulumi.get(self, "state") + + @property + @pulumi.getter + def version(self) -> int: + return pulumi.get(self, "version") + + +class AwaitableGetCryptoKeyLatestVersionResult(GetCryptoKeyLatestVersionResult): + # pylint: disable=using-constant-test + def __await__(self): + if False: + yield self + return GetCryptoKeyLatestVersionResult( + algorithm=self.algorithm, + crypto_key=self.crypto_key, + filter=self.filter, + id=self.id, + name=self.name, + protection_level=self.protection_level, + public_keys=self.public_keys, + state=self.state, + version=self.version) + + +def get_crypto_key_latest_version(crypto_key: Optional[str] = None, + filter: Optional[str] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCryptoKeyLatestVersionResult: + """ + Provides access to the latest Google Cloud Platform KMS CryptoKeyVersion in a CryptoKey. For more information see + [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + and + [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + + ## Example Usage + + ```python + import pulumi + import pulumi_gcp as gcp + + my_key_ring = gcp.kms.get_kms_key_ring(name="my-key-ring", + location="us-central1") + my_crypto_key = gcp.kms.get_kms_crypto_key(name="my-crypto-key", + key_ring=my_key_ring.id) + my_crypto_key_latest_version = gcp.kms.get_crypto_key_latest_version(crypto_key=my_key["id"]) + ``` + + + :param str crypto_key: The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + `kms.CryptoKey` resource/datasource. + :param str filter: The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + + Example filter values if filtering on state. + + * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + + [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + """ + __args__ = dict() + __args__['cryptoKey'] = crypto_key + __args__['filter'] = filter + opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) + __ret__ = pulumi.runtime.invoke('gcp:kms/getCryptoKeyLatestVersion:getCryptoKeyLatestVersion', __args__, opts=opts, typ=GetCryptoKeyLatestVersionResult).value + + return AwaitableGetCryptoKeyLatestVersionResult( + algorithm=pulumi.get(__ret__, 'algorithm'), + crypto_key=pulumi.get(__ret__, 'crypto_key'), + filter=pulumi.get(__ret__, 'filter'), + id=pulumi.get(__ret__, 'id'), + name=pulumi.get(__ret__, 'name'), + protection_level=pulumi.get(__ret__, 'protection_level'), + public_keys=pulumi.get(__ret__, 'public_keys'), + state=pulumi.get(__ret__, 'state'), + version=pulumi.get(__ret__, 'version')) + + +@_utilities.lift_output_func(get_crypto_key_latest_version) +def get_crypto_key_latest_version_output(crypto_key: Optional[pulumi.Input[str]] = None, + filter: Optional[pulumi.Input[Optional[str]]] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCryptoKeyLatestVersionResult]: + """ + Provides access to the latest Google Cloud Platform KMS CryptoKeyVersion in a CryptoKey. For more information see + [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + and + [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + + ## Example Usage + + ```python + import pulumi + import pulumi_gcp as gcp + + my_key_ring = gcp.kms.get_kms_key_ring(name="my-key-ring", + location="us-central1") + my_crypto_key = gcp.kms.get_kms_crypto_key(name="my-crypto-key", + key_ring=my_key_ring.id) + my_crypto_key_latest_version = gcp.kms.get_crypto_key_latest_version(crypto_key=my_key["id"]) + ``` + + + :param str crypto_key: The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + `kms.CryptoKey` resource/datasource. + :param str filter: The filter argument is used to add a filter query parameter that limits which type of cryptoKeyVersion is retrieved as the latest by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + + Example filter values if filtering on state. + + * `"state:ENABLED"` will retrieve the latest cryptoKeyVersion that has the state "ENABLED". + + [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + """ + ... diff --git a/sdk/python/pulumi_gcp/kms/get_crypto_key_versions.py b/sdk/python/pulumi_gcp/kms/get_crypto_key_versions.py new file mode 100644 index 0000000000..0cc90014ba --- /dev/null +++ b/sdk/python/pulumi_gcp/kms/get_crypto_key_versions.py @@ -0,0 +1,175 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import sys +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +if sys.version_info >= (3, 11): + from typing import NotRequired, TypedDict, TypeAlias +else: + from typing_extensions import NotRequired, TypedDict, TypeAlias +from .. import _utilities +from . import outputs + +__all__ = [ + 'GetCryptoKeyVersionsResult', + 'AwaitableGetCryptoKeyVersionsResult', + 'get_crypto_key_versions', + 'get_crypto_key_versions_output', +] + +@pulumi.output_type +class GetCryptoKeyVersionsResult: + """ + A collection of values returned by getCryptoKeyVersions. + """ + def __init__(__self__, crypto_key=None, filter=None, id=None, public_keys=None, versions=None): + if crypto_key and not isinstance(crypto_key, str): + raise TypeError("Expected argument 'crypto_key' to be a str") + pulumi.set(__self__, "crypto_key", crypto_key) + if filter and not isinstance(filter, str): + raise TypeError("Expected argument 'filter' to be a str") + pulumi.set(__self__, "filter", filter) + if id and not isinstance(id, str): + raise TypeError("Expected argument 'id' to be a str") + pulumi.set(__self__, "id", id) + if public_keys and not isinstance(public_keys, list): + raise TypeError("Expected argument 'public_keys' to be a list") + pulumi.set(__self__, "public_keys", public_keys) + if versions and not isinstance(versions, list): + raise TypeError("Expected argument 'versions' to be a list") + pulumi.set(__self__, "versions", versions) + + @property + @pulumi.getter(name="cryptoKey") + def crypto_key(self) -> str: + return pulumi.get(self, "crypto_key") + + @property + @pulumi.getter + def filter(self) -> Optional[str]: + return pulumi.get(self, "filter") + + @property + @pulumi.getter + def id(self) -> str: + """ + The provider-assigned unique ID for this managed resource. + """ + return pulumi.get(self, "id") + + @property + @pulumi.getter(name="publicKeys") + def public_keys(self) -> Sequence['outputs.GetCryptoKeyVersionsPublicKeyResult']: + return pulumi.get(self, "public_keys") + + @property + @pulumi.getter + def versions(self) -> Sequence['outputs.GetCryptoKeyVersionsVersionResult']: + """ + A list of all the retrieved crypto key versions from the provided crypto key. This list is influenced by the provided filter argument. + """ + return pulumi.get(self, "versions") + + +class AwaitableGetCryptoKeyVersionsResult(GetCryptoKeyVersionsResult): + # pylint: disable=using-constant-test + def __await__(self): + if False: + yield self + return GetCryptoKeyVersionsResult( + crypto_key=self.crypto_key, + filter=self.filter, + id=self.id, + public_keys=self.public_keys, + versions=self.versions) + + +def get_crypto_key_versions(crypto_key: Optional[str] = None, + filter: Optional[str] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCryptoKeyVersionsResult: + """ + Provides access to Google Cloud Platform KMS CryptoKeyVersions. For more information see + [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + and + [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + + ## Example Usage + + ```python + import pulumi + import pulumi_gcp as gcp + + my_key_ring = gcp.kms.get_kms_key_ring(name="my-key-ring", + location="us-central1") + my_crypto_key = gcp.kms.get_kms_crypto_key(name="my-crypto-key", + key_ring=my_key_ring.id) + my_crypto_key_versions = gcp.kms.get_crypto_key_versions(crypto_key=my_key["id"]) + ``` + + + :param str crypto_key: The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + `kms.CryptoKey` resource/datasource. + :param str filter: The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + + Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + + * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + + [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + """ + __args__ = dict() + __args__['cryptoKey'] = crypto_key + __args__['filter'] = filter + opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) + __ret__ = pulumi.runtime.invoke('gcp:kms/getCryptoKeyVersions:getCryptoKeyVersions', __args__, opts=opts, typ=GetCryptoKeyVersionsResult).value + + return AwaitableGetCryptoKeyVersionsResult( + crypto_key=pulumi.get(__ret__, 'crypto_key'), + filter=pulumi.get(__ret__, 'filter'), + id=pulumi.get(__ret__, 'id'), + public_keys=pulumi.get(__ret__, 'public_keys'), + versions=pulumi.get(__ret__, 'versions')) + + +@_utilities.lift_output_func(get_crypto_key_versions) +def get_crypto_key_versions_output(crypto_key: Optional[pulumi.Input[str]] = None, + filter: Optional[pulumi.Input[Optional[str]]] = None, + opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCryptoKeyVersionsResult]: + """ + Provides access to Google Cloud Platform KMS CryptoKeyVersions. For more information see + [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) + and + [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). + + ## Example Usage + + ```python + import pulumi + import pulumi_gcp as gcp + + my_key_ring = gcp.kms.get_kms_key_ring(name="my-key-ring", + location="us-central1") + my_crypto_key = gcp.kms.get_kms_crypto_key(name="my-crypto-key", + key_ring=my_key_ring.id) + my_crypto_key_versions = gcp.kms.get_crypto_key_versions(crypto_key=my_key["id"]) + ``` + + + :param str crypto_key: The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + `kms.CryptoKey` resource/datasource. + :param str filter: The filter argument is used to add a filter query parameter that limits which versions are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + + Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}/cryptoKeyVersions. + + * `"name:my-key-"` will retrieve cryptoKeyVersions that contain "my-key-" anywhere in their name. + * `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1/cryptoKeyVersions/my-version-1"` will only retrieve a key with that exact name. + + [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + """ + ... diff --git a/sdk/python/pulumi_gcp/kms/outputs.py b/sdk/python/pulumi_gcp/kms/outputs.py index 6243961369..39d092e772 100644 --- a/sdk/python/pulumi_gcp/kms/outputs.py +++ b/sdk/python/pulumi_gcp/kms/outputs.py @@ -33,6 +33,10 @@ 'KeyRingIAMMemberCondition', 'KeyRingImportJobAttestation', 'KeyRingImportJobPublicKey', + 'GetCryptoKeyLatestVersionPublicKeyResult', + 'GetCryptoKeyVersionsPublicKeyResult', + 'GetCryptoKeyVersionsVersionResult', + 'GetCryptoKeyVersionsVersionPublicKeyResult', 'GetCryptoKeysKeyResult', 'GetCryptoKeysKeyKeyAccessJustificationsPolicyResult', 'GetCryptoKeysKeyPrimaryResult', @@ -983,6 +987,166 @@ def pem(self) -> Optional[str]: return pulumi.get(self, "pem") +@pulumi.output_type +class GetCryptoKeyLatestVersionPublicKeyResult(dict): + def __init__(__self__, *, + algorithm: str, + pem: str): + """ + :param str algorithm: The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + :param str pem: The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + """ + pulumi.set(__self__, "algorithm", algorithm) + pulumi.set(__self__, "pem", pem) + + @property + @pulumi.getter + def algorithm(self) -> str: + """ + The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + """ + return pulumi.get(self, "algorithm") + + @property + @pulumi.getter + def pem(self) -> str: + """ + The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + """ + return pulumi.get(self, "pem") + + +@pulumi.output_type +class GetCryptoKeyVersionsPublicKeyResult(dict): + def __init__(__self__, *, + algorithm: str, + pem: str): + """ + :param str algorithm: The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + :param str pem: The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + """ + pulumi.set(__self__, "algorithm", algorithm) + pulumi.set(__self__, "pem", pem) + + @property + @pulumi.getter + def algorithm(self) -> str: + """ + The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + """ + return pulumi.get(self, "algorithm") + + @property + @pulumi.getter + def pem(self) -> str: + """ + The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + """ + return pulumi.get(self, "pem") + + +@pulumi.output_type +class GetCryptoKeyVersionsVersionResult(dict): + def __init__(__self__, *, + algorithm: str, + crypto_key: str, + id: str, + name: str, + protection_level: str, + public_keys: Sequence['outputs.GetCryptoKeyVersionsVersionPublicKeyResult'], + state: str, + version: int): + """ + :param str algorithm: The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + :param str crypto_key: The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + `kms.CryptoKey` resource/datasource. + """ + pulumi.set(__self__, "algorithm", algorithm) + pulumi.set(__self__, "crypto_key", crypto_key) + pulumi.set(__self__, "id", id) + pulumi.set(__self__, "name", name) + pulumi.set(__self__, "protection_level", protection_level) + pulumi.set(__self__, "public_keys", public_keys) + pulumi.set(__self__, "state", state) + pulumi.set(__self__, "version", version) + + @property + @pulumi.getter + def algorithm(self) -> str: + """ + The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + """ + return pulumi.get(self, "algorithm") + + @property + @pulumi.getter(name="cryptoKey") + def crypto_key(self) -> str: + """ + The `id` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the + `kms.CryptoKey` resource/datasource. + """ + return pulumi.get(self, "crypto_key") + + @property + @pulumi.getter + def id(self) -> str: + return pulumi.get(self, "id") + + @property + @pulumi.getter + def name(self) -> str: + return pulumi.get(self, "name") + + @property + @pulumi.getter(name="protectionLevel") + def protection_level(self) -> str: + return pulumi.get(self, "protection_level") + + @property + @pulumi.getter(name="publicKeys") + def public_keys(self) -> Sequence['outputs.GetCryptoKeyVersionsVersionPublicKeyResult']: + return pulumi.get(self, "public_keys") + + @property + @pulumi.getter + def state(self) -> str: + return pulumi.get(self, "state") + + @property + @pulumi.getter + def version(self) -> int: + return pulumi.get(self, "version") + + +@pulumi.output_type +class GetCryptoKeyVersionsVersionPublicKeyResult(dict): + def __init__(__self__, *, + algorithm: str, + pem: str): + """ + :param str algorithm: The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + :param str pem: The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + """ + pulumi.set(__self__, "algorithm", algorithm) + pulumi.set(__self__, "pem", pem) + + @property + @pulumi.getter + def algorithm(self) -> str: + """ + The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. + """ + return pulumi.get(self, "algorithm") + + @property + @pulumi.getter + def pem(self) -> str: + """ + The public key, encoded in PEM format. For more information, see the RFC 7468 sections for General Considerations and Textual Encoding of Subject Public Key Info. + """ + return pulumi.get(self, "pem") + + @pulumi.output_type class GetCryptoKeysKeyResult(dict): def __init__(__self__, *, diff --git a/sdk/python/pulumi_gcp/netapp/active_directory.py b/sdk/python/pulumi_gcp/netapp/active_directory.py index 94811e28ce..44b948b434 100644 --- a/sdk/python/pulumi_gcp/netapp/active_directory.py +++ b/sdk/python/pulumi_gcp/netapp/active_directory.py @@ -850,7 +850,7 @@ def __init__(__self__, """ ActiveDirectory is the public representation of the active directory config. - To get more information about activeDirectory, see: + To get more information about ActiveDirectory, see: * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.activeDirectories) * How-to Guides @@ -900,7 +900,7 @@ def __init__(__self__, ## Import - activeDirectory can be imported using any of these accepted formats: + ActiveDirectory can be imported using any of these accepted formats: * `projects/{{project}}/locations/{{location}}/activeDirectories/{{name}}` @@ -908,7 +908,7 @@ def __init__(__self__, * `{{location}}/{{name}}` - When using the `pulumi import` command, activeDirectory can be imported using one of the formats above. For example: + When using the `pulumi import` command, ActiveDirectory can be imported using one of the formats above. For example: ```sh $ pulumi import gcp:netapp/activeDirectory:ActiveDirectory default projects/{{project}}/locations/{{location}}/activeDirectories/{{name}} @@ -966,7 +966,7 @@ def __init__(__self__, """ ActiveDirectory is the public representation of the active directory config. - To get more information about activeDirectory, see: + To get more information about ActiveDirectory, see: * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.activeDirectories) * How-to Guides @@ -1016,7 +1016,7 @@ def __init__(__self__, ## Import - activeDirectory can be imported using any of these accepted formats: + ActiveDirectory can be imported using any of these accepted formats: * `projects/{{project}}/locations/{{location}}/activeDirectories/{{name}}` @@ -1024,7 +1024,7 @@ def __init__(__self__, * `{{location}}/{{name}}` - When using the `pulumi import` command, activeDirectory can be imported using one of the formats above. For example: + When using the `pulumi import` command, ActiveDirectory can be imported using one of the formats above. For example: ```sh $ pulumi import gcp:netapp/activeDirectory:ActiveDirectory default projects/{{project}}/locations/{{location}}/activeDirectories/{{name}} diff --git a/sdk/python/pulumi_gcp/netapp/backup.py b/sdk/python/pulumi_gcp/netapp/backup.py index e0c36cba1f..9c753a732e 100644 --- a/sdk/python/pulumi_gcp/netapp/backup.py +++ b/sdk/python/pulumi_gcp/netapp/backup.py @@ -468,7 +468,7 @@ def __init__(__self__, from a volume or from an existing volume snapshot. Scheduled backups require a backup policy. - To get more information about backup, see: + To get more information about Backup, see: * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults.backups) * How-to Guides @@ -512,7 +512,7 @@ def __init__(__self__, ## Import - backup can be imported using any of these accepted formats: + Backup can be imported using any of these accepted formats: * `projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}}` @@ -520,7 +520,7 @@ def __init__(__self__, * `{{location}}/{{vault_name}}/{{name}}` - When using the `pulumi import` command, backup can be imported using one of the formats above. For example: + When using the `pulumi import` command, Backup can be imported using one of the formats above. For example: ```sh $ pulumi import gcp:netapp/backup:Backup default projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}} @@ -576,7 +576,7 @@ def __init__(__self__, from a volume or from an existing volume snapshot. Scheduled backups require a backup policy. - To get more information about backup, see: + To get more information about Backup, see: * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults.backups) * How-to Guides @@ -620,7 +620,7 @@ def __init__(__self__, ## Import - backup can be imported using any of these accepted formats: + Backup can be imported using any of these accepted formats: * `projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}}` @@ -628,7 +628,7 @@ def __init__(__self__, * `{{location}}/{{vault_name}}/{{name}}` - When using the `pulumi import` command, backup can be imported using one of the formats above. For example: + When using the `pulumi import` command, Backup can be imported using one of the formats above. For example: ```sh $ pulumi import gcp:netapp/backup:Backup default projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}} diff --git a/sdk/python/pulumi_gcp/netapp/backup_policy.py b/sdk/python/pulumi_gcp/netapp/backup_policy.py index c9590ea053..aa8bf360bc 100644 --- a/sdk/python/pulumi_gcp/netapp/backup_policy.py +++ b/sdk/python/pulumi_gcp/netapp/backup_policy.py @@ -450,7 +450,7 @@ def __init__(__self__, Backup policies allow you to attach a backup schedule to a volume. The policy defines how many backups to retain at daily, weekly, or monthly intervals. - To get more information about backupPolicy, see: + To get more information about BackupPolicy, see: * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupPolicies) * How-to Guides @@ -479,7 +479,7 @@ def __init__(__self__, ## Import - backupPolicy can be imported using any of these accepted formats: + BackupPolicy can be imported using any of these accepted formats: * `projects/{{project}}/locations/{{location}}/backupPolicies/{{name}}` @@ -487,7 +487,7 @@ def __init__(__self__, * `{{location}}/{{name}}` - When using the `pulumi import` command, backupPolicy can be imported using one of the formats above. For example: + When using the `pulumi import` command, BackupPolicy can be imported using one of the formats above. For example: ```sh $ pulumi import gcp:netapp/backupPolicy:BackupPolicy default projects/{{project}}/locations/{{location}}/backupPolicies/{{name}} @@ -532,7 +532,7 @@ def __init__(__self__, Backup policies allow you to attach a backup schedule to a volume. The policy defines how many backups to retain at daily, weekly, or monthly intervals. - To get more information about backupPolicy, see: + To get more information about BackupPolicy, see: * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupPolicies) * How-to Guides @@ -561,7 +561,7 @@ def __init__(__self__, ## Import - backupPolicy can be imported using any of these accepted formats: + BackupPolicy can be imported using any of these accepted formats: * `projects/{{project}}/locations/{{location}}/backupPolicies/{{name}}` @@ -569,7 +569,7 @@ def __init__(__self__, * `{{location}}/{{name}}` - When using the `pulumi import` command, backupPolicy can be imported using one of the formats above. For example: + When using the `pulumi import` command, BackupPolicy can be imported using one of the formats above. For example: ```sh $ pulumi import gcp:netapp/backupPolicy:BackupPolicy default projects/{{project}}/locations/{{location}}/backupPolicies/{{name}} diff --git a/sdk/python/pulumi_gcp/netapp/backup_vault.py b/sdk/python/pulumi_gcp/netapp/backup_vault.py index f7d78a0b52..c746744eb6 100644 --- a/sdk/python/pulumi_gcp/netapp/backup_vault.py +++ b/sdk/python/pulumi_gcp/netapp/backup_vault.py @@ -300,7 +300,7 @@ def __init__(__self__, A backup vault is the location where backups are stored. You can only create one backup vault per region. A vault can hold multiple backups for multiple volumes in that region. - To get more information about backupVault, see: + To get more information about BackupVault, see: * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults) * How-to Guides @@ -325,7 +325,7 @@ def __init__(__self__, ## Import - backupVault can be imported using any of these accepted formats: + BackupVault can be imported using any of these accepted formats: * `projects/{{project}}/locations/{{location}}/backupVaults/{{name}}` @@ -333,7 +333,7 @@ def __init__(__self__, * `{{location}}/{{name}}` - When using the `pulumi import` command, backupVault can be imported using one of the formats above. For example: + When using the `pulumi import` command, BackupVault can be imported using one of the formats above. For example: ```sh $ pulumi import gcp:netapp/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{name}} @@ -372,7 +372,7 @@ def __init__(__self__, A backup vault is the location where backups are stored. You can only create one backup vault per region. A vault can hold multiple backups for multiple volumes in that region. - To get more information about backupVault, see: + To get more information about BackupVault, see: * [API documentation](https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults) * How-to Guides @@ -397,7 +397,7 @@ def __init__(__self__, ## Import - backupVault can be imported using any of these accepted formats: + BackupVault can be imported using any of these accepted formats: * `projects/{{project}}/locations/{{location}}/backupVaults/{{name}}` @@ -405,7 +405,7 @@ def __init__(__self__, * `{{location}}/{{name}}` - When using the `pulumi import` command, backupVault can be imported using one of the formats above. For example: + When using the `pulumi import` command, BackupVault can be imported using one of the formats above. For example: ```sh $ pulumi import gcp:netapp/backupVault:BackupVault default projects/{{project}}/locations/{{location}}/backupVaults/{{name}} diff --git a/sdk/python/pulumi_gcp/netapp/storage_pool.py b/sdk/python/pulumi_gcp/netapp/storage_pool.py index af5a48fdc1..afc37436bb 100644 --- a/sdk/python/pulumi_gcp/netapp/storage_pool.py +++ b/sdk/python/pulumi_gcp/netapp/storage_pool.py @@ -645,7 +645,7 @@ def __init__(__self__, ## Import - storagePool can be imported using any of these accepted formats: + StoragePool can be imported using any of these accepted formats: * `projects/{{project}}/locations/{{location}}/storagePools/{{name}}` @@ -653,7 +653,7 @@ def __init__(__self__, * `{{location}}/{{name}}` - When using the `pulumi import` command, storagePool can be imported using one of the formats above. For example: + When using the `pulumi import` command, StoragePool can be imported using one of the formats above. For example: ```sh $ pulumi import gcp:netapp/storagePool:StoragePool default projects/{{project}}/locations/{{location}}/storagePools/{{name}} @@ -747,7 +747,7 @@ def __init__(__self__, ## Import - storagePool can be imported using any of these accepted formats: + StoragePool can be imported using any of these accepted formats: * `projects/{{project}}/locations/{{location}}/storagePools/{{name}}` @@ -755,7 +755,7 @@ def __init__(__self__, * `{{location}}/{{name}}` - When using the `pulumi import` command, storagePool can be imported using one of the formats above. For example: + When using the `pulumi import` command, StoragePool can be imported using one of the formats above. For example: ```sh $ pulumi import gcp:netapp/storagePool:StoragePool default projects/{{project}}/locations/{{location}}/storagePools/{{name}} diff --git a/sdk/python/pulumi_gcp/netapp/volume.py b/sdk/python/pulumi_gcp/netapp/volume.py index 95327b1e33..03689a2441 100644 --- a/sdk/python/pulumi_gcp/netapp/volume.py +++ b/sdk/python/pulumi_gcp/netapp/volume.py @@ -54,6 +54,7 @@ def __init__(__self__, *, :param pulumi.Input[str] deletion_policy: Policy to determine if the volume should be deleted forcefully. Volumes may have nested snapshot resources. Deleting such a volume will fail. Setting this parameter to FORCE will delete volumes including nested snapshots. + Possible values: DEFAULT, FORCE. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input['VolumeExportPolicyArgs'] export_policy: Export policy of the volume for NFSV3 and/or NFSV4.1 access. Structure is documented below. @@ -200,6 +201,7 @@ def deletion_policy(self) -> Optional[pulumi.Input[str]]: Policy to determine if the volume should be deleted forcefully. Volumes may have nested snapshot resources. Deleting such a volume will fail. Setting this parameter to FORCE will delete volumes including nested snapshots. + Possible values: DEFAULT, FORCE. """ return pulumi.get(self, "deletion_policy") @@ -429,6 +431,7 @@ def __init__(__self__, *, :param pulumi.Input[str] deletion_policy: Policy to determine if the volume should be deleted forcefully. Volumes may have nested snapshot resources. Deleting such a volume will fail. Setting this parameter to FORCE will delete volumes including nested snapshots. + Possible values: DEFAULT, FORCE. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] effective_labels: All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. :param pulumi.Input[str] encryption_type: Reports the data-at-rest encryption type of the volume. Inherited from storage pool. @@ -611,6 +614,7 @@ def deletion_policy(self) -> Optional[pulumi.Input[str]]: Policy to determine if the volume should be deleted forcefully. Volumes may have nested snapshot resources. Deleting such a volume will fail. Setting this parameter to FORCE will delete volumes including nested snapshots. + Possible values: DEFAULT, FORCE. """ return pulumi.get(self, "deletion_policy") @@ -1118,6 +1122,7 @@ def __init__(__self__, :param pulumi.Input[str] deletion_policy: Policy to determine if the volume should be deleted forcefully. Volumes may have nested snapshot resources. Deleting such a volume will fail. Setting this parameter to FORCE will delete volumes including nested snapshots. + Possible values: DEFAULT, FORCE. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[Union['VolumeExportPolicyArgs', 'VolumeExportPolicyArgsDict']] export_policy: Export policy of the volume for NFSV3 and/or NFSV4.1 access. Structure is documented below. @@ -1376,6 +1381,7 @@ def get(resource_name: str, :param pulumi.Input[str] deletion_policy: Policy to determine if the volume should be deleted forcefully. Volumes may have nested snapshot resources. Deleting such a volume will fail. Setting this parameter to FORCE will delete volumes including nested snapshots. + Possible values: DEFAULT, FORCE. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] effective_labels: All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. :param pulumi.Input[str] encryption_type: Reports the data-at-rest encryption type of the volume. Inherited from storage pool. @@ -1510,6 +1516,7 @@ def deletion_policy(self) -> pulumi.Output[Optional[str]]: Policy to determine if the volume should be deleted forcefully. Volumes may have nested snapshot resources. Deleting such a volume will fail. Setting this parameter to FORCE will delete volumes including nested snapshots. + Possible values: DEFAULT, FORCE. """ return pulumi.get(self, "deletion_policy") diff --git a/sdk/python/pulumi_gcp/networkconnectivity/_inputs.py b/sdk/python/pulumi_gcp/networkconnectivity/_inputs.py index 299e4a5883..16bdb71e2f 100644 --- a/sdk/python/pulumi_gcp/networkconnectivity/_inputs.py +++ b/sdk/python/pulumi_gcp/networkconnectivity/_inputs.py @@ -892,6 +892,10 @@ class SpokeLinkedVpcNetworkArgsDict(TypedDict): """ IP ranges encompassing the subnets to be excluded from peering. """ + include_export_ranges: NotRequired[pulumi.Input[Sequence[pulumi.Input[str]]]] + """ + IP ranges allowed to be included from peering. + """ elif False: SpokeLinkedVpcNetworkArgsDict: TypeAlias = Mapping[str, Any] @@ -899,14 +903,18 @@ class SpokeLinkedVpcNetworkArgsDict(TypedDict): class SpokeLinkedVpcNetworkArgs: def __init__(__self__, *, uri: pulumi.Input[str], - exclude_export_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): + exclude_export_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, + include_export_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None): """ :param pulumi.Input[str] uri: The URI of the VPC network resource. :param pulumi.Input[Sequence[pulumi.Input[str]]] exclude_export_ranges: IP ranges encompassing the subnets to be excluded from peering. + :param pulumi.Input[Sequence[pulumi.Input[str]]] include_export_ranges: IP ranges allowed to be included from peering. """ pulumi.set(__self__, "uri", uri) if exclude_export_ranges is not None: pulumi.set(__self__, "exclude_export_ranges", exclude_export_ranges) + if include_export_ranges is not None: + pulumi.set(__self__, "include_export_ranges", include_export_ranges) @property @pulumi.getter @@ -932,6 +940,18 @@ def exclude_export_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[s def exclude_export_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "exclude_export_ranges", value) + @property + @pulumi.getter(name="includeExportRanges") + def include_export_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: + """ + IP ranges allowed to be included from peering. + """ + return pulumi.get(self, "include_export_ranges") + + @include_export_ranges.setter + def include_export_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): + pulumi.set(self, "include_export_ranges", value) + if not MYPY: class SpokeLinkedVpnTunnelsArgsDict(TypedDict): diff --git a/sdk/python/pulumi_gcp/networkconnectivity/outputs.py b/sdk/python/pulumi_gcp/networkconnectivity/outputs.py index b47c6cb33f..74802fe14a 100644 --- a/sdk/python/pulumi_gcp/networkconnectivity/outputs.py +++ b/sdk/python/pulumi_gcp/networkconnectivity/outputs.py @@ -659,6 +659,8 @@ def __key_warning(key: str): suggest = None if key == "excludeExportRanges": suggest = "exclude_export_ranges" + elif key == "includeExportRanges": + suggest = "include_export_ranges" if suggest: pulumi.log.warn(f"Key '{key}' not found in SpokeLinkedVpcNetwork. Access the value via the '{suggest}' property getter instead.") @@ -673,14 +675,18 @@ def get(self, key: str, default = None) -> Any: def __init__(__self__, *, uri: str, - exclude_export_ranges: Optional[Sequence[str]] = None): + exclude_export_ranges: Optional[Sequence[str]] = None, + include_export_ranges: Optional[Sequence[str]] = None): """ :param str uri: The URI of the VPC network resource. :param Sequence[str] exclude_export_ranges: IP ranges encompassing the subnets to be excluded from peering. + :param Sequence[str] include_export_ranges: IP ranges allowed to be included from peering. """ pulumi.set(__self__, "uri", uri) if exclude_export_ranges is not None: pulumi.set(__self__, "exclude_export_ranges", exclude_export_ranges) + if include_export_ranges is not None: + pulumi.set(__self__, "include_export_ranges", include_export_ranges) @property @pulumi.getter @@ -698,6 +704,14 @@ def exclude_export_ranges(self) -> Optional[Sequence[str]]: """ return pulumi.get(self, "exclude_export_ranges") + @property + @pulumi.getter(name="includeExportRanges") + def include_export_ranges(self) -> Optional[Sequence[str]]: + """ + IP ranges allowed to be included from peering. + """ + return pulumi.get(self, "include_export_ranges") + @pulumi.output_type class SpokeLinkedVpnTunnels(dict): diff --git a/sdk/python/pulumi_gcp/networkconnectivity/spoke.py b/sdk/python/pulumi_gcp/networkconnectivity/spoke.py index 10e0980597..c7bf659b4f 100644 --- a/sdk/python/pulumi_gcp/networkconnectivity/spoke.py +++ b/sdk/python/pulumi_gcp/networkconnectivity/spoke.py @@ -545,6 +545,10 @@ def __init__(__self__, "198.51.100.0/24", "10.10.0.0/16", ], + "include_export_ranges": [ + "198.51.100.0/23", + "10.0.0.0/8", + ], "uri": network.self_link, }) ``` @@ -694,6 +698,10 @@ def __init__(__self__, "198.51.100.0/24", "10.10.0.0/16", ], + "include_export_ranges": [ + "198.51.100.0/23", + "10.0.0.0/8", + ], "uri": network.self_link, }) ``` diff --git a/sdk/python/pulumi_gcp/networksecurity/client_tls_policy.py b/sdk/python/pulumi_gcp/networksecurity/client_tls_policy.py index 481f50b64d..8b05b5614c 100644 --- a/sdk/python/pulumi_gcp/networksecurity/client_tls_policy.py +++ b/sdk/python/pulumi_gcp/networksecurity/client_tls_policy.py @@ -407,6 +407,14 @@ def __init__(__self__, sni: Optional[pulumi.Input[str]] = None, __props__=None): """ + ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. + + To get more information about ClientTlsPolicy, see: + + * [API documentation](https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.clientTlsPolicies) + * How-to Guides + * [Service Security](https://cloud.google.com/traffic-director/docs/security-use-cases) + ## Example Usage ### Network Security Client Tls Policy Basic @@ -440,18 +448,11 @@ def __init__(__self__, "plugin_instance": "google_cloud_private_spiffe", }, }, - server_validation_cas=[ - { - "grpc_endpoint": { - "target_uri": "unix:mypath", - }, - }, - { - "grpc_endpoint": { - "target_uri": "unix:mypath1", - }, + server_validation_cas=[{ + "grpc_endpoint": { + "target_uri": "unix:mypath", }, - ]) + }]) ``` ## Import @@ -505,6 +506,14 @@ def __init__(__self__, args: Optional[ClientTlsPolicyArgs] = None, opts: Optional[pulumi.ResourceOptions] = None): """ + ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. + + To get more information about ClientTlsPolicy, see: + + * [API documentation](https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.clientTlsPolicies) + * How-to Guides + * [Service Security](https://cloud.google.com/traffic-director/docs/security-use-cases) + ## Example Usage ### Network Security Client Tls Policy Basic @@ -538,18 +547,11 @@ def __init__(__self__, "plugin_instance": "google_cloud_private_spiffe", }, }, - server_validation_cas=[ - { - "grpc_endpoint": { - "target_uri": "unix:mypath", - }, - }, - { - "grpc_endpoint": { - "target_uri": "unix:mypath1", - }, + server_validation_cas=[{ + "grpc_endpoint": { + "target_uri": "unix:mypath", }, - ]) + }]) ``` ## Import diff --git a/sdk/python/pulumi_gcp/networksecurity/server_tls_policy.py b/sdk/python/pulumi_gcp/networksecurity/server_tls_policy.py index 8d3ea34d21..284b8327da 100644 --- a/sdk/python/pulumi_gcp/networksecurity/server_tls_policy.py +++ b/sdk/python/pulumi_gcp/networksecurity/server_tls_policy.py @@ -419,6 +419,12 @@ def __init__(__self__, server_certificate: Optional[pulumi.Input[Union['ServerTlsPolicyServerCertificateArgs', 'ServerTlsPolicyServerCertificateArgsDict']]] = None, __props__=None): """ + ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. + + To get more information about ServerTlsPolicy, see: + + * [API documentation](https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.serverTlsPolicies) + ## Example Usage ### Network Security Server Tls Policy Basic @@ -440,23 +446,11 @@ def __init__(__self__, }, }, mtls_policy={ - "client_validation_cas": [ - { - "grpc_endpoint": { - "target_uri": "unix:mypath", - }, + "client_validation_cas": [{ + "grpc_endpoint": { + "target_uri": "unix:mypath", }, - { - "grpc_endpoint": { - "target_uri": "unix:abc/mypath", - }, - }, - { - "certificate_provider_instance": { - "plugin_instance": "google_cloud_private_spiffe", - }, - }, - ], + }], }) ``` ### Network Security Server Tls Policy Advanced @@ -588,6 +582,12 @@ def __init__(__self__, args: Optional[ServerTlsPolicyArgs] = None, opts: Optional[pulumi.ResourceOptions] = None): """ + ClientTlsPolicy is a resource that specifies how a client should authenticate connections to backends of a service. This resource itself does not affect configuration unless it is attached to a backend service resource. + + To get more information about ServerTlsPolicy, see: + + * [API documentation](https://cloud.google.com/traffic-director/docs/reference/network-security/rest/v1beta1/projects.locations.serverTlsPolicies) + ## Example Usage ### Network Security Server Tls Policy Basic @@ -609,23 +609,11 @@ def __init__(__self__, }, }, mtls_policy={ - "client_validation_cas": [ - { - "grpc_endpoint": { - "target_uri": "unix:mypath", - }, + "client_validation_cas": [{ + "grpc_endpoint": { + "target_uri": "unix:mypath", }, - { - "grpc_endpoint": { - "target_uri": "unix:abc/mypath", - }, - }, - { - "certificate_provider_instance": { - "plugin_instance": "google_cloud_private_spiffe", - }, - }, - ], + }], }) ``` ### Network Security Server Tls Policy Advanced diff --git a/sdk/python/pulumi_gcp/organizations/get_project.py b/sdk/python/pulumi_gcp/organizations/get_project.py index f866ed7eac..ea4c001124 100644 --- a/sdk/python/pulumi_gcp/organizations/get_project.py +++ b/sdk/python/pulumi_gcp/organizations/get_project.py @@ -26,7 +26,7 @@ class GetProjectResult: """ A collection of values returned by getProject. """ - def __init__(__self__, auto_create_network=None, billing_account=None, deletion_policy=None, effective_labels=None, folder_id=None, id=None, labels=None, name=None, number=None, org_id=None, project_id=None, pulumi_labels=None): + def __init__(__self__, auto_create_network=None, billing_account=None, deletion_policy=None, effective_labels=None, folder_id=None, id=None, labels=None, name=None, number=None, org_id=None, project_id=None, pulumi_labels=None, tags=None): if auto_create_network and not isinstance(auto_create_network, bool): raise TypeError("Expected argument 'auto_create_network' to be a bool") pulumi.set(__self__, "auto_create_network", auto_create_network) @@ -63,6 +63,9 @@ def __init__(__self__, auto_create_network=None, billing_account=None, deletion_ if pulumi_labels and not isinstance(pulumi_labels, dict): raise TypeError("Expected argument 'pulumi_labels' to be a dict") pulumi.set(__self__, "pulumi_labels", pulumi_labels) + if tags and not isinstance(tags, dict): + raise TypeError("Expected argument 'tags' to be a dict") + pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="autoCreateNetwork") @@ -130,6 +133,11 @@ def project_id(self) -> Optional[str]: def pulumi_labels(self) -> Mapping[str, str]: return pulumi.get(self, "pulumi_labels") + @property + @pulumi.getter + def tags(self) -> Mapping[str, str]: + return pulumi.get(self, "tags") + class AwaitableGetProjectResult(GetProjectResult): # pylint: disable=using-constant-test @@ -148,7 +156,8 @@ def __await__(self): number=self.number, org_id=self.org_id, project_id=self.project_id, - pulumi_labels=self.pulumi_labels) + pulumi_labels=self.pulumi_labels, + tags=self.tags) def get_project(project_id: Optional[str] = None, @@ -188,7 +197,8 @@ def get_project(project_id: Optional[str] = None, number=pulumi.get(__ret__, 'number'), org_id=pulumi.get(__ret__, 'org_id'), project_id=pulumi.get(__ret__, 'project_id'), - pulumi_labels=pulumi.get(__ret__, 'pulumi_labels')) + pulumi_labels=pulumi.get(__ret__, 'pulumi_labels'), + tags=pulumi.get(__ret__, 'tags')) @_utilities.lift_output_func(get_project) diff --git a/sdk/python/pulumi_gcp/organizations/project.py b/sdk/python/pulumi_gcp/organizations/project.py index a38e1544e1..574d1b133f 100644 --- a/sdk/python/pulumi_gcp/organizations/project.py +++ b/sdk/python/pulumi_gcp/organizations/project.py @@ -26,7 +26,8 @@ def __init__(__self__, *, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, org_id: Optional[pulumi.Input[str]] = None, - project_id: Optional[pulumi.Input[str]] = None): + project_id: Optional[pulumi.Input[str]] = None, + tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ The set of arguments for constructing a Project resource. :param pulumi.Input[bool] auto_create_network: Create the 'default' network automatically. Default true. If set to false, the default network will be deleted. Note @@ -53,6 +54,7 @@ def __init__(__self__, *, this forces the project to be migrated to the newly specified organization. :param pulumi.Input[str] project_id: The project ID. Changing this forces a new project to be created. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. """ if auto_create_network is not None: pulumi.set(__self__, "auto_create_network", auto_create_network) @@ -70,6 +72,8 @@ def __init__(__self__, *, pulumi.set(__self__, "org_id", org_id) if project_id is not None: pulumi.set(__self__, "project_id", project_id) + if tags is not None: + pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="autoCreateNetwork") @@ -181,6 +185,18 @@ def project_id(self) -> Optional[pulumi.Input[str]]: def project_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project_id", value) + @property + @pulumi.getter + def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: + """ + A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + """ + return pulumi.get(self, "tags") + + @tags.setter + def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): + pulumi.set(self, "tags", value) + @pulumi.input_type class _ProjectState: @@ -195,7 +211,8 @@ def __init__(__self__, *, number: Optional[pulumi.Input[str]] = None, org_id: Optional[pulumi.Input[str]] = None, project_id: Optional[pulumi.Input[str]] = None, - pulumi_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): + pulumi_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ Input properties used for looking up and filtering Project resources. :param pulumi.Input[bool] auto_create_network: Create the 'default' network automatically. Default true. If set to false, the default network will be deleted. Note @@ -225,6 +242,7 @@ def __init__(__self__, *, organization. :param pulumi.Input[str] project_id: The project ID. Changing this forces a new project to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] pulumi_labels: The combination of labels configured directly on the resource and default labels configured on the provider. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. """ if auto_create_network is not None: pulumi.set(__self__, "auto_create_network", auto_create_network) @@ -248,6 +266,8 @@ def __init__(__self__, *, pulumi.set(__self__, "project_id", project_id) if pulumi_labels is not None: pulumi.set(__self__, "pulumi_labels", pulumi_labels) + if tags is not None: + pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="autoCreateNetwork") @@ -395,6 +415,18 @@ def pulumi_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]] def pulumi_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "pulumi_labels", value) + @property + @pulumi.getter + def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: + """ + A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + """ + return pulumi.get(self, "tags") + + @tags.setter + def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): + pulumi.set(self, "tags", value) + class Project(pulumi.CustomResource): @overload @@ -409,6 +441,7 @@ def __init__(__self__, name: Optional[pulumi.Input[str]] = None, org_id: Optional[pulumi.Input[str]] = None, project_id: Optional[pulumi.Input[str]] = None, + tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): """ Allows creation and management of a Google Cloud Platform project. @@ -423,6 +456,10 @@ def __init__(__self__, > This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account. + > It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `auto_create_network` to false, when possible. + + > It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. + To get more information about projects, see: * [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects) @@ -456,6 +493,21 @@ def __init__(__self__, folder_id=department1.name) ``` + To create a project with a tag + + ```python + import pulumi + import pulumi_gcp as gcp + + my_project = gcp.organizations.Project("my_project", + name="My Project", + project_id="your-project-id", + org_id="1234567", + tags={ + "1234567/env": "staging", + }) + ``` + ## Import Projects can be imported using the `project_id`, e.g. @@ -494,6 +546,7 @@ def __init__(__self__, this forces the project to be migrated to the newly specified organization. :param pulumi.Input[str] project_id: The project ID. Changing this forces a new project to be created. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. """ ... @overload @@ -514,6 +567,10 @@ def __init__(__self__, > This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account. + > It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `auto_create_network` to false, when possible. + + > It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. + To get more information about projects, see: * [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects) @@ -547,6 +604,21 @@ def __init__(__self__, folder_id=department1.name) ``` + To create a project with a tag + + ```python + import pulumi + import pulumi_gcp as gcp + + my_project = gcp.organizations.Project("my_project", + name="My Project", + project_id="your-project-id", + org_id="1234567", + tags={ + "1234567/env": "staging", + }) + ``` + ## Import Projects can be imported using the `project_id`, e.g. @@ -582,6 +654,7 @@ def _internal_init(__self__, name: Optional[pulumi.Input[str]] = None, org_id: Optional[pulumi.Input[str]] = None, project_id: Optional[pulumi.Input[str]] = None, + tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): @@ -599,6 +672,7 @@ def _internal_init(__self__, __props__.__dict__["name"] = name __props__.__dict__["org_id"] = org_id __props__.__dict__["project_id"] = project_id + __props__.__dict__["tags"] = tags __props__.__dict__["effective_labels"] = None __props__.__dict__["number"] = None __props__.__dict__["pulumi_labels"] = None @@ -624,7 +698,8 @@ def get(resource_name: str, number: Optional[pulumi.Input[str]] = None, org_id: Optional[pulumi.Input[str]] = None, project_id: Optional[pulumi.Input[str]] = None, - pulumi_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Project': + pulumi_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, + tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Project': """ Get an existing Project resource's state with the given name, id, and optional extra properties used to qualify the lookup. @@ -659,6 +734,7 @@ def get(resource_name: str, organization. :param pulumi.Input[str] project_id: The project ID. Changing this forces a new project to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] pulumi_labels: The combination of labels configured directly on the resource and default labels configured on the provider. + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) @@ -675,6 +751,7 @@ def get(resource_name: str, __props__.__dict__["org_id"] = org_id __props__.__dict__["project_id"] = project_id __props__.__dict__["pulumi_labels"] = pulumi_labels + __props__.__dict__["tags"] = tags return Project(resource_name, opts=opts, __props__=__props__) @property @@ -779,3 +856,11 @@ def pulumi_labels(self) -> pulumi.Output[Mapping[str, str]]: """ return pulumi.get(self, "pulumi_labels") + @property + @pulumi.getter + def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: + """ + A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored when empty. The field is immutable and causes resource replacement when mutated. + """ + return pulumi.get(self, "tags") + diff --git a/sdk/python/pulumi_gcp/parallelstore/instance.py b/sdk/python/pulumi_gcp/parallelstore/instance.py index a082521a3c..de0fd1dbbd 100644 --- a/sdk/python/pulumi_gcp/parallelstore/instance.py +++ b/sdk/python/pulumi_gcp/parallelstore/instance.py @@ -59,12 +59,12 @@ def __init__(__self__, *, FILE_STRIPE_LEVEL_MIN FILE_STRIPE_LEVEL_BALANCED FILE_STRIPE_LEVEL_MAX - :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Cloud Labels are a flexible and lightweight mechanism for organizing cloud - resources into groups that reflect a customer's organizational needs and - deployment strategies. Cloud Labels can be used to filter collections of - resources. They can be used to control how resource metrics are aggregated. - And they can be used as arguments to policy management rules (e.g. route, - firewall, load balancing, etc.). + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Cloud Labels are a flexible and lightweight mechanism for + organizing cloud resources into groups that reflect a customer's organizational + needs and deployment strategies. Cloud Labels can be used to filter collections + of resources. They can be used to control how resource metrics are aggregated. + And they can be used as arguments to policy management rules (e.g. route, firewall, + load balancing, etc.). * Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `a-z{0,62}`. * Label values must be between 0 and 63 characters long and must conform @@ -75,19 +75,19 @@ def __init__(__self__, *, characters may be allowed in the future. Therefore, you are advised to use an internal label representation, such as JSON, which doesn't rely upon specific characters being disallowed. For example, representing labels - as the string: name + "_" + value would prove problematic if we were to - allow "_" in a future release. + as the string: `name + "_" + value` would prove problematic if we were to + allow `"_"` in a future release. " + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field `effective_labels` for all of the labels present on the resource. - :param pulumi.Input[str] network: Immutable. The name of the Google Compute Engine - [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - instance is connected. + :param pulumi.Input[str] network: Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + to which the instance is connected. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. - :param pulumi.Input[str] reserved_ip_range: Immutable. Contains the id of the allocated IP address range associated with the - private service access connection for example, "test-default" associated - with IP range 10.0.0.0/29. If no range id is provided all ranges will be - considered. + :param pulumi.Input[str] reserved_ip_range: Immutable. Contains the id of the allocated IP address range + associated with the private service access connection for example, \\"test-default\\" + associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + be considered. """ pulumi.set(__self__, "capacity_gib", capacity_gib) pulumi.set(__self__, "instance_id", instance_id) @@ -205,12 +205,12 @@ def file_stripe_level(self, value: Optional[pulumi.Input[str]]): @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ - Cloud Labels are a flexible and lightweight mechanism for organizing cloud - resources into groups that reflect a customer's organizational needs and - deployment strategies. Cloud Labels can be used to filter collections of - resources. They can be used to control how resource metrics are aggregated. - And they can be used as arguments to policy management rules (e.g. route, - firewall, load balancing, etc.). + Cloud Labels are a flexible and lightweight mechanism for + organizing cloud resources into groups that reflect a customer's organizational + needs and deployment strategies. Cloud Labels can be used to filter collections + of resources. They can be used to control how resource metrics are aggregated. + And they can be used as arguments to policy management rules (e.g. route, firewall, + load balancing, etc.). * Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `a-z{0,62}`. * Label values must be between 0 and 63 characters long and must conform @@ -221,8 +221,9 @@ def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: characters may be allowed in the future. Therefore, you are advised to use an internal label representation, such as JSON, which doesn't rely upon specific characters being disallowed. For example, representing labels - as the string: name + "_" + value would prove problematic if we were to - allow "_" in a future release. + as the string: `name + "_" + value` would prove problematic if we were to + allow `"_"` in a future release. " + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field `effective_labels` for all of the labels present on the resource. """ @@ -236,9 +237,8 @@ def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]) @pulumi.getter def network(self) -> Optional[pulumi.Input[str]]: """ - Immutable. The name of the Google Compute Engine - [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - instance is connected. + Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + to which the instance is connected. """ return pulumi.get(self, "network") @@ -263,10 +263,10 @@ def project(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="reservedIpRange") def reserved_ip_range(self) -> Optional[pulumi.Input[str]]: """ - Immutable. Contains the id of the allocated IP address range associated with the - private service access connection for example, "test-default" associated - with IP range 10.0.0.0/29. If no range id is provided all ranges will be - considered. + Immutable. Contains the id of the allocated IP address range + associated with the private service access connection for example, \\"test-default\\" + associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + be considered. """ return pulumi.get(self, "reserved_ip_range") @@ -303,7 +303,7 @@ def __init__(__self__, *, Contains a list of IPv4 addresses used for client side configuration. :param pulumi.Input[str] capacity_gib: Required. Immutable. Storage capacity of Parallelstore instance in Gibibytes (GiB). :param pulumi.Input[str] create_time: The time when the instance was created. - :param pulumi.Input[str] daos_version: The version of DAOS software running in the instance + :param pulumi.Input[str] daos_version: The version of DAOS software running in the instance. :param pulumi.Input[str] description: The description of the instance. 2048 characters or less. :param pulumi.Input[str] directory_stripe_level: Stripe level for directories. MIN when directory has a small number of files. @@ -314,9 +314,9 @@ def __init__(__self__, *, DIRECTORY_STRIPE_LEVEL_BALANCED DIRECTORY_STRIPE_LEVEL_MAX :param pulumi.Input[Mapping[str, pulumi.Input[str]]] effective_labels: All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. - :param pulumi.Input[str] effective_reserved_ip_range: Immutable. Contains the id of the allocated IP address range associated with the - private service access connection for example, "test-default" associated - with IP range 10.0.0.0/29. This field is populated by the service and + :param pulumi.Input[str] effective_reserved_ip_range: Immutable. Contains the id of the allocated IP address + range associated with the private service access connection for example, \\"test-default\\" + associated with IP range 10.0.0.0/29. This field is populated by the service and contains the value currently used by the service. :param pulumi.Input[str] file_stripe_level: Stripe level for files. MIN better suited for small size files. @@ -335,12 +335,12 @@ def __init__(__self__, *, - - - - :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Cloud Labels are a flexible and lightweight mechanism for organizing cloud - resources into groups that reflect a customer's organizational needs and - deployment strategies. Cloud Labels can be used to filter collections of - resources. They can be used to control how resource metrics are aggregated. - And they can be used as arguments to policy management rules (e.g. route, - firewall, load balancing, etc.). + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Cloud Labels are a flexible and lightweight mechanism for + organizing cloud resources into groups that reflect a customer's organizational + needs and deployment strategies. Cloud Labels can be used to filter collections + of resources. They can be used to control how resource metrics are aggregated. + And they can be used as arguments to policy management rules (e.g. route, firewall, + load balancing, etc.). * Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `a-z{0,62}`. * Label values must be between 0 and 63 characters long and must conform @@ -351,24 +351,24 @@ def __init__(__self__, *, characters may be allowed in the future. Therefore, you are advised to use an internal label representation, such as JSON, which doesn't rely upon specific characters being disallowed. For example, representing labels - as the string: name + "_" + value would prove problematic if we were to - allow "_" in a future release. + as the string: `name + "_" + value` would prove problematic if we were to + allow `"_"` in a future release. " + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field `effective_labels` for all of the labels present on the resource. :param pulumi.Input[str] location: Part of `parent`. See documentation of `projectsId`. :param pulumi.Input[str] name: Identifier. The resource name of the instance, in the format `projects/{project}/locations/{location}/instances/{instance_id}` - :param pulumi.Input[str] network: Immutable. The name of the Google Compute Engine - [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - instance is connected. + :param pulumi.Input[str] network: Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + to which the instance is connected. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] pulumi_labels: The combination of labels configured directly on the resource and default labels configured on the provider. - :param pulumi.Input[str] reserved_ip_range: Immutable. Contains the id of the allocated IP address range associated with the - private service access connection for example, "test-default" associated - with IP range 10.0.0.0/29. If no range id is provided all ranges will be - considered. + :param pulumi.Input[str] reserved_ip_range: Immutable. Contains the id of the allocated IP address range + associated with the private service access connection for example, \\"test-default\\" + associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + be considered. :param pulumi.Input[str] state: The instance state. Possible values: STATE_UNSPECIFIED @@ -459,7 +459,7 @@ def create_time(self, value: Optional[pulumi.Input[str]]): @pulumi.getter(name="daosVersion") def daos_version(self) -> Optional[pulumi.Input[str]]: """ - The version of DAOS software running in the instance + The version of DAOS software running in the instance. """ return pulumi.get(self, "daos_version") @@ -514,9 +514,9 @@ def effective_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Inpu @pulumi.getter(name="effectiveReservedIpRange") def effective_reserved_ip_range(self) -> Optional[pulumi.Input[str]]: """ - Immutable. Contains the id of the allocated IP address range associated with the - private service access connection for example, "test-default" associated - with IP range 10.0.0.0/29. This field is populated by the service and + Immutable. Contains the id of the allocated IP address + range associated with the private service access connection for example, \\"test-default\\" + associated with IP range 10.0.0.0/29. This field is populated by the service and contains the value currently used by the service. """ return pulumi.get(self, "effective_reserved_ip_range") @@ -568,12 +568,12 @@ def instance_id(self, value: Optional[pulumi.Input[str]]): @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ - Cloud Labels are a flexible and lightweight mechanism for organizing cloud - resources into groups that reflect a customer's organizational needs and - deployment strategies. Cloud Labels can be used to filter collections of - resources. They can be used to control how resource metrics are aggregated. - And they can be used as arguments to policy management rules (e.g. route, - firewall, load balancing, etc.). + Cloud Labels are a flexible and lightweight mechanism for + organizing cloud resources into groups that reflect a customer's organizational + needs and deployment strategies. Cloud Labels can be used to filter collections + of resources. They can be used to control how resource metrics are aggregated. + And they can be used as arguments to policy management rules (e.g. route, firewall, + load balancing, etc.). * Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `a-z{0,62}`. * Label values must be between 0 and 63 characters long and must conform @@ -584,8 +584,9 @@ def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: characters may be allowed in the future. Therefore, you are advised to use an internal label representation, such as JSON, which doesn't rely upon specific characters being disallowed. For example, representing labels - as the string: name + "_" + value would prove problematic if we were to - allow "_" in a future release. + as the string: `name + "_" + value` would prove problematic if we were to + allow `"_"` in a future release. " + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field `effective_labels` for all of the labels present on the resource. """ @@ -624,9 +625,8 @@ def name(self, value: Optional[pulumi.Input[str]]): @pulumi.getter def network(self) -> Optional[pulumi.Input[str]]: """ - Immutable. The name of the Google Compute Engine - [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - instance is connected. + Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + to which the instance is connected. """ return pulumi.get(self, "network") @@ -664,10 +664,10 @@ def pulumi_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[s @pulumi.getter(name="reservedIpRange") def reserved_ip_range(self) -> Optional[pulumi.Input[str]]: """ - Immutable. Contains the id of the allocated IP address range associated with the - private service access connection for example, "test-default" associated - with IP range 10.0.0.0/29. If no range id is provided all ranges will be - considered. + Immutable. Contains the id of the allocated IP address range + associated with the private service access connection for example, \\"test-default\\" + associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + be considered. """ return pulumi.get(self, "reserved_ip_range") @@ -815,12 +815,12 @@ def __init__(__self__, - - - - :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Cloud Labels are a flexible and lightweight mechanism for organizing cloud - resources into groups that reflect a customer's organizational needs and - deployment strategies. Cloud Labels can be used to filter collections of - resources. They can be used to control how resource metrics are aggregated. - And they can be used as arguments to policy management rules (e.g. route, - firewall, load balancing, etc.). + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Cloud Labels are a flexible and lightweight mechanism for + organizing cloud resources into groups that reflect a customer's organizational + needs and deployment strategies. Cloud Labels can be used to filter collections + of resources. They can be used to control how resource metrics are aggregated. + And they can be used as arguments to policy management rules (e.g. route, firewall, + load balancing, etc.). * Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `a-z{0,62}`. * Label values must be between 0 and 63 characters long and must conform @@ -831,20 +831,20 @@ def __init__(__self__, characters may be allowed in the future. Therefore, you are advised to use an internal label representation, such as JSON, which doesn't rely upon specific characters being disallowed. For example, representing labels - as the string: name + "_" + value would prove problematic if we were to - allow "_" in a future release. + as the string: `name + "_" + value` would prove problematic if we were to + allow `"_"` in a future release. " + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field `effective_labels` for all of the labels present on the resource. :param pulumi.Input[str] location: Part of `parent`. See documentation of `projectsId`. - :param pulumi.Input[str] network: Immutable. The name of the Google Compute Engine - [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - instance is connected. + :param pulumi.Input[str] network: Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + to which the instance is connected. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. - :param pulumi.Input[str] reserved_ip_range: Immutable. Contains the id of the allocated IP address range associated with the - private service access connection for example, "test-default" associated - with IP range 10.0.0.0/29. If no range id is provided all ranges will be - considered. + :param pulumi.Input[str] reserved_ip_range: Immutable. Contains the id of the allocated IP address range + associated with the private service access connection for example, \\"test-default\\" + associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + be considered. """ ... @overload @@ -1016,7 +1016,7 @@ def get(resource_name: str, Contains a list of IPv4 addresses used for client side configuration. :param pulumi.Input[str] capacity_gib: Required. Immutable. Storage capacity of Parallelstore instance in Gibibytes (GiB). :param pulumi.Input[str] create_time: The time when the instance was created. - :param pulumi.Input[str] daos_version: The version of DAOS software running in the instance + :param pulumi.Input[str] daos_version: The version of DAOS software running in the instance. :param pulumi.Input[str] description: The description of the instance. 2048 characters or less. :param pulumi.Input[str] directory_stripe_level: Stripe level for directories. MIN when directory has a small number of files. @@ -1027,9 +1027,9 @@ def get(resource_name: str, DIRECTORY_STRIPE_LEVEL_BALANCED DIRECTORY_STRIPE_LEVEL_MAX :param pulumi.Input[Mapping[str, pulumi.Input[str]]] effective_labels: All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services. - :param pulumi.Input[str] effective_reserved_ip_range: Immutable. Contains the id of the allocated IP address range associated with the - private service access connection for example, "test-default" associated - with IP range 10.0.0.0/29. This field is populated by the service and + :param pulumi.Input[str] effective_reserved_ip_range: Immutable. Contains the id of the allocated IP address + range associated with the private service access connection for example, \\"test-default\\" + associated with IP range 10.0.0.0/29. This field is populated by the service and contains the value currently used by the service. :param pulumi.Input[str] file_stripe_level: Stripe level for files. MIN better suited for small size files. @@ -1048,12 +1048,12 @@ def get(resource_name: str, - - - - :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Cloud Labels are a flexible and lightweight mechanism for organizing cloud - resources into groups that reflect a customer's organizational needs and - deployment strategies. Cloud Labels can be used to filter collections of - resources. They can be used to control how resource metrics are aggregated. - And they can be used as arguments to policy management rules (e.g. route, - firewall, load balancing, etc.). + :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Cloud Labels are a flexible and lightweight mechanism for + organizing cloud resources into groups that reflect a customer's organizational + needs and deployment strategies. Cloud Labels can be used to filter collections + of resources. They can be used to control how resource metrics are aggregated. + And they can be used as arguments to policy management rules (e.g. route, firewall, + load balancing, etc.). * Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `a-z{0,62}`. * Label values must be between 0 and 63 characters long and must conform @@ -1064,24 +1064,24 @@ def get(resource_name: str, characters may be allowed in the future. Therefore, you are advised to use an internal label representation, such as JSON, which doesn't rely upon specific characters being disallowed. For example, representing labels - as the string: name + "_" + value would prove problematic if we were to - allow "_" in a future release. + as the string: `name + "_" + value` would prove problematic if we were to + allow `"_"` in a future release. " + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field `effective_labels` for all of the labels present on the resource. :param pulumi.Input[str] location: Part of `parent`. See documentation of `projectsId`. :param pulumi.Input[str] name: Identifier. The resource name of the instance, in the format `projects/{project}/locations/{location}/instances/{instance_id}` - :param pulumi.Input[str] network: Immutable. The name of the Google Compute Engine - [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - instance is connected. + :param pulumi.Input[str] network: Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + to which the instance is connected. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] pulumi_labels: The combination of labels configured directly on the resource and default labels configured on the provider. - :param pulumi.Input[str] reserved_ip_range: Immutable. Contains the id of the allocated IP address range associated with the - private service access connection for example, "test-default" associated - with IP range 10.0.0.0/29. If no range id is provided all ranges will be - considered. + :param pulumi.Input[str] reserved_ip_range: Immutable. Contains the id of the allocated IP address range + associated with the private service access connection for example, \\"test-default\\" + associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + be considered. :param pulumi.Input[str] state: The instance state. Possible values: STATE_UNSPECIFIED @@ -1146,7 +1146,7 @@ def create_time(self) -> pulumi.Output[str]: @pulumi.getter(name="daosVersion") def daos_version(self) -> pulumi.Output[str]: """ - The version of DAOS software running in the instance + The version of DAOS software running in the instance. """ return pulumi.get(self, "daos_version") @@ -1185,9 +1185,9 @@ def effective_labels(self) -> pulumi.Output[Mapping[str, str]]: @pulumi.getter(name="effectiveReservedIpRange") def effective_reserved_ip_range(self) -> pulumi.Output[str]: """ - Immutable. Contains the id of the allocated IP address range associated with the - private service access connection for example, "test-default" associated - with IP range 10.0.0.0/29. This field is populated by the service and + Immutable. Contains the id of the allocated IP address + range associated with the private service access connection for example, \\"test-default\\" + associated with IP range 10.0.0.0/29. This field is populated by the service and contains the value currently used by the service. """ return pulumi.get(self, "effective_reserved_ip_range") @@ -1227,12 +1227,12 @@ def instance_id(self) -> pulumi.Output[str]: @pulumi.getter def labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ - Cloud Labels are a flexible and lightweight mechanism for organizing cloud - resources into groups that reflect a customer's organizational needs and - deployment strategies. Cloud Labels can be used to filter collections of - resources. They can be used to control how resource metrics are aggregated. - And they can be used as arguments to policy management rules (e.g. route, - firewall, load balancing, etc.). + Cloud Labels are a flexible and lightweight mechanism for + organizing cloud resources into groups that reflect a customer's organizational + needs and deployment strategies. Cloud Labels can be used to filter collections + of resources. They can be used to control how resource metrics are aggregated. + And they can be used as arguments to policy management rules (e.g. route, firewall, + load balancing, etc.). * Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `a-z{0,62}`. * Label values must be between 0 and 63 characters long and must conform @@ -1243,8 +1243,9 @@ def labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]: characters may be allowed in the future. Therefore, you are advised to use an internal label representation, such as JSON, which doesn't rely upon specific characters being disallowed. For example, representing labels - as the string: name + "_" + value would prove problematic if we were to - allow "_" in a future release. + as the string: `name + "_" + value` would prove problematic if we were to + allow `"_"` in a future release. " + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer to the field `effective_labels` for all of the labels present on the resource. """ @@ -1271,9 +1272,8 @@ def name(self) -> pulumi.Output[str]: @pulumi.getter def network(self) -> pulumi.Output[Optional[str]]: """ - Immutable. The name of the Google Compute Engine - [VPC network](https://cloud.google.com/vpc/docs/vpc) to which the - instance is connected. + Immutable. The name of the Google Compute Engine [VPC network](https://cloud.google.com/vpc/docs/vpc) + to which the instance is connected. """ return pulumi.get(self, "network") @@ -1299,10 +1299,10 @@ def pulumi_labels(self) -> pulumi.Output[Mapping[str, str]]: @pulumi.getter(name="reservedIpRange") def reserved_ip_range(self) -> pulumi.Output[Optional[str]]: """ - Immutable. Contains the id of the allocated IP address range associated with the - private service access connection for example, "test-default" associated - with IP range 10.0.0.0/29. If no range id is provided all ranges will be - considered. + Immutable. Contains the id of the allocated IP address range + associated with the private service access connection for example, \\"test-default\\" + associated with IP range 10.0.0.0/29. If no range id is provided all ranges will + be considered. """ return pulumi.get(self, "reserved_ip_range") diff --git a/sdk/python/pulumi_gcp/projects/iam_member_remove.py b/sdk/python/pulumi_gcp/projects/iam_member_remove.py index 778857cf38..c37933b79b 100644 --- a/sdk/python/pulumi_gcp/projects/iam_member_remove.py +++ b/sdk/python/pulumi_gcp/projects/iam_member_remove.py @@ -175,6 +175,19 @@ def __init__(__self__, and [API reference](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy). + ## Example Usage + + ```python + import pulumi + import pulumi_gcp as gcp + + target_project = gcp.organizations.get_project() + foo = gcp.projects.IamMemberRemove("foo", + role="roles/editor", + project=target_project_google_project["projectId"], + member=f"serviceAccount:{target_project_google_project['number']}-compute@developer.gserviceaccount.com") + ``` + :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] member: The IAM principal that should not have the target role. @@ -213,6 +226,19 @@ def __init__(__self__, and [API reference](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy). + ## Example Usage + + ```python + import pulumi + import pulumi_gcp as gcp + + target_project = gcp.organizations.get_project() + foo = gcp.projects.IamMemberRemove("foo", + role="roles/editor", + project=target_project_google_project["projectId"], + member=f"serviceAccount:{target_project_google_project['number']}-compute@developer.gserviceaccount.com") + ``` + :param str resource_name: The name of the resource. :param IamMemberRemoveArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. diff --git a/sdk/python/pulumi_gcp/projects/usage_export_bucket.py b/sdk/python/pulumi_gcp/projects/usage_export_bucket.py index 5619f85f25..9802788b03 100644 --- a/sdk/python/pulumi_gcp/projects/usage_export_bucket.py +++ b/sdk/python/pulumi_gcp/projects/usage_export_bucket.py @@ -149,6 +149,10 @@ def __init__(__self__, > This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account. + > It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `auto_create_network` to false, when possible. + + > It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. + To get more information about projects, see: * [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects) @@ -182,6 +186,21 @@ def __init__(__self__, folder_id=department1.name) ``` + To create a project with a tag + + ```python + import pulumi + import pulumi_gcp as gcp + + my_project = gcp.organizations.Project("my_project", + name="My Project", + project_id="your-project-id", + org_id="1234567", + tags={ + "1234567/env": "staging", + }) + ``` + ## Import Projects can be imported using the `project_id`, e.g. @@ -219,6 +238,10 @@ def __init__(__self__, > This resource reads the specified billing account on every pulumi up and plan operation so you must have permissions on the specified billing account. + > It is recommended to use the `constraints/compute.skipDefaultNetworkCreation` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to remove the default network instead of setting `auto_create_network` to false, when possible. + + > It may take a while for the attached tag bindings to be deleted after the project is scheduled to be deleted. + To get more information about projects, see: * [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects) @@ -252,6 +275,21 @@ def __init__(__self__, folder_id=department1.name) ``` + To create a project with a tag + + ```python + import pulumi + import pulumi_gcp as gcp + + my_project = gcp.organizations.Project("my_project", + name="My Project", + project_id="your-project-id", + org_id="1234567", + tags={ + "1234567/env": "staging", + }) + ``` + ## Import Projects can be imported using the `project_id`, e.g. diff --git a/sdk/python/pulumi_gcp/pubsub/_inputs.py b/sdk/python/pulumi_gcp/pubsub/_inputs.py index e41fcc2e28..dc44186f5a 100644 --- a/sdk/python/pulumi_gcp/pubsub/_inputs.py +++ b/sdk/python/pulumi_gcp/pubsub/_inputs.py @@ -583,6 +583,10 @@ class SubscriptionCloudStorageConfigArgsDict(TypedDict): May not exceed the subscription's acknowledgement deadline. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". """ + max_messages: NotRequired[pulumi.Input[int]] + """ + The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + """ service_account_email: NotRequired[pulumi.Input[str]] """ The service account to use to write to Cloud Storage. If not specified, the Pub/Sub @@ -607,6 +611,7 @@ def __init__(__self__, *, filename_suffix: Optional[pulumi.Input[str]] = None, max_bytes: Optional[pulumi.Input[int]] = None, max_duration: Optional[pulumi.Input[str]] = None, + max_messages: Optional[pulumi.Input[int]] = None, service_account_email: Optional[pulumi.Input[str]] = None, state: Optional[pulumi.Input[str]] = None): """ @@ -621,6 +626,7 @@ def __init__(__self__, *, :param pulumi.Input[str] max_duration: The maximum duration that can elapse before a new Cloud Storage file is created. Min 1 minute, max 10 minutes, default 5 minutes. May not exceed the subscription's acknowledgement deadline. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + :param pulumi.Input[int] max_messages: The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. :param pulumi.Input[str] service_account_email: The service account to use to write to Cloud Storage. If not specified, the Pub/Sub [service agent](https://cloud.google.com/iam/docs/service-agents), service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -640,6 +646,8 @@ def __init__(__self__, *, pulumi.set(__self__, "max_bytes", max_bytes) if max_duration is not None: pulumi.set(__self__, "max_duration", max_duration) + if max_messages is not None: + pulumi.set(__self__, "max_messages", max_messages) if service_account_email is not None: pulumi.set(__self__, "service_account_email", service_account_email) if state is not None: @@ -733,6 +741,18 @@ def max_duration(self) -> Optional[pulumi.Input[str]]: def max_duration(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "max_duration", value) + @property + @pulumi.getter(name="maxMessages") + def max_messages(self) -> Optional[pulumi.Input[int]]: + """ + The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + """ + return pulumi.get(self, "max_messages") + + @max_messages.setter + def max_messages(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "max_messages", value) + @property @pulumi.getter(name="serviceAccountEmail") def service_account_email(self) -> Optional[pulumi.Input[str]]: @@ -763,6 +783,10 @@ def state(self, value: Optional[pulumi.Input[str]]): if not MYPY: class SubscriptionCloudStorageConfigAvroConfigArgsDict(TypedDict): + use_topic_schema: NotRequired[pulumi.Input[bool]] + """ + When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + """ write_metadata: NotRequired[pulumi.Input[bool]] """ When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. @@ -773,13 +797,29 @@ class SubscriptionCloudStorageConfigAvroConfigArgsDict(TypedDict): @pulumi.input_type class SubscriptionCloudStorageConfigAvroConfigArgs: def __init__(__self__, *, + use_topic_schema: Optional[pulumi.Input[bool]] = None, write_metadata: Optional[pulumi.Input[bool]] = None): """ + :param pulumi.Input[bool] use_topic_schema: When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. :param pulumi.Input[bool] write_metadata: When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. """ + if use_topic_schema is not None: + pulumi.set(__self__, "use_topic_schema", use_topic_schema) if write_metadata is not None: pulumi.set(__self__, "write_metadata", write_metadata) + @property + @pulumi.getter(name="useTopicSchema") + def use_topic_schema(self) -> Optional[pulumi.Input[bool]]: + """ + When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + """ + return pulumi.get(self, "use_topic_schema") + + @use_topic_schema.setter + def use_topic_schema(self, value: Optional[pulumi.Input[bool]]): + pulumi.set(self, "use_topic_schema", value) + @property @pulumi.getter(name="writeMetadata") def write_metadata(self) -> Optional[pulumi.Input[bool]]: diff --git a/sdk/python/pulumi_gcp/pubsub/outputs.py b/sdk/python/pulumi_gcp/pubsub/outputs.py index fec09fa1ac..7d79b0165b 100644 --- a/sdk/python/pulumi_gcp/pubsub/outputs.py +++ b/sdk/python/pulumi_gcp/pubsub/outputs.py @@ -453,6 +453,8 @@ def __key_warning(key: str): suggest = "max_bytes" elif key == "maxDuration": suggest = "max_duration" + elif key == "maxMessages": + suggest = "max_messages" elif key == "serviceAccountEmail": suggest = "service_account_email" @@ -475,6 +477,7 @@ def __init__(__self__, *, filename_suffix: Optional[str] = None, max_bytes: Optional[int] = None, max_duration: Optional[str] = None, + max_messages: Optional[int] = None, service_account_email: Optional[str] = None, state: Optional[str] = None): """ @@ -489,6 +492,7 @@ def __init__(__self__, *, :param str max_duration: The maximum duration that can elapse before a new Cloud Storage file is created. Min 1 minute, max 10 minutes, default 5 minutes. May not exceed the subscription's acknowledgement deadline. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + :param int max_messages: The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. :param str service_account_email: The service account to use to write to Cloud Storage. If not specified, the Pub/Sub [service agent](https://cloud.google.com/iam/docs/service-agents), service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -508,6 +512,8 @@ def __init__(__self__, *, pulumi.set(__self__, "max_bytes", max_bytes) if max_duration is not None: pulumi.set(__self__, "max_duration", max_duration) + if max_messages is not None: + pulumi.set(__self__, "max_messages", max_messages) if service_account_email is not None: pulumi.set(__self__, "service_account_email", service_account_email) if state is not None: @@ -573,6 +579,14 @@ def max_duration(self) -> Optional[str]: """ return pulumi.get(self, "max_duration") + @property + @pulumi.getter(name="maxMessages") + def max_messages(self) -> Optional[int]: + """ + The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + """ + return pulumi.get(self, "max_messages") + @property @pulumi.getter(name="serviceAccountEmail") def service_account_email(self) -> Optional[str]: @@ -598,7 +612,9 @@ class SubscriptionCloudStorageConfigAvroConfig(dict): @staticmethod def __key_warning(key: str): suggest = None - if key == "writeMetadata": + if key == "useTopicSchema": + suggest = "use_topic_schema" + elif key == "writeMetadata": suggest = "write_metadata" if suggest: @@ -613,13 +629,25 @@ def get(self, key: str, default = None) -> Any: return super().get(key, default) def __init__(__self__, *, + use_topic_schema: Optional[bool] = None, write_metadata: Optional[bool] = None): """ + :param bool use_topic_schema: When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. :param bool write_metadata: When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. """ + if use_topic_schema is not None: + pulumi.set(__self__, "use_topic_schema", use_topic_schema) if write_metadata is not None: pulumi.set(__self__, "write_metadata", write_metadata) + @property + @pulumi.getter(name="useTopicSchema") + def use_topic_schema(self) -> Optional[bool]: + """ + When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + """ + return pulumi.get(self, "use_topic_schema") + @property @pulumi.getter(name="writeMetadata") def write_metadata(self) -> Optional[bool]: @@ -1428,6 +1456,7 @@ def __init__(__self__, *, filename_suffix: str, max_bytes: int, max_duration: str, + max_messages: int, service_account_email: str, state: str): """ @@ -1441,6 +1470,7 @@ def __init__(__self__, *, :param str max_duration: The maximum duration that can elapse before a new Cloud Storage file is created. Min 1 minute, max 10 minutes, default 5 minutes. May not exceed the subscription's acknowledgement deadline. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + :param int max_messages: The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. :param str service_account_email: The service account to use to write to Cloud Storage. If not specified, the Pub/Sub [service agent](https://cloud.google.com/iam/docs/service-agents), service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. @@ -1453,6 +1483,7 @@ def __init__(__self__, *, pulumi.set(__self__, "filename_suffix", filename_suffix) pulumi.set(__self__, "max_bytes", max_bytes) pulumi.set(__self__, "max_duration", max_duration) + pulumi.set(__self__, "max_messages", max_messages) pulumi.set(__self__, "service_account_email", service_account_email) pulumi.set(__self__, "state", state) @@ -1515,6 +1546,14 @@ def max_duration(self) -> str: """ return pulumi.get(self, "max_duration") + @property + @pulumi.getter(name="maxMessages") + def max_messages(self) -> int: + """ + The maximum messages that can be written to a Cloud Storage file before a new file is created. Min 1000 messages. + """ + return pulumi.get(self, "max_messages") + @property @pulumi.getter(name="serviceAccountEmail") def service_account_email(self) -> str: @@ -1537,12 +1576,23 @@ def state(self) -> str: @pulumi.output_type class GetSubscriptionCloudStorageConfigAvroConfigResult(dict): def __init__(__self__, *, + use_topic_schema: bool, write_metadata: bool): """ + :param bool use_topic_schema: When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. :param bool write_metadata: When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. """ + pulumi.set(__self__, "use_topic_schema", use_topic_schema) pulumi.set(__self__, "write_metadata", write_metadata) + @property + @pulumi.getter(name="useTopicSchema") + def use_topic_schema(self) -> bool: + """ + When true, the output Cloud Storage file will be serialized using the topic schema, if it exists. + """ + return pulumi.get(self, "use_topic_schema") + @property @pulumi.getter(name="writeMetadata") def write_metadata(self) -> bool: diff --git a/sdk/python/pulumi_gcp/pubsub/subscription.py b/sdk/python/pulumi_gcp/pubsub/subscription.py index 492e4d6149..5df737e993 100644 --- a/sdk/python/pulumi_gcp/pubsub/subscription.py +++ b/sdk/python/pulumi_gcp/pubsub/subscription.py @@ -1123,6 +1123,7 @@ def __init__(__self__, "filename_datetime_format": "YYYY-MM-DD/hh_mm_ssZ", "max_bytes": 1000, "max_duration": "300s", + "max_messages": 1000, }, opts = pulumi.ResourceOptions(depends_on=[ example, @@ -1155,8 +1156,10 @@ def __init__(__self__, "filename_datetime_format": "YYYY-MM-DD/hh_mm_ssZ", "max_bytes": 1000, "max_duration": "300s", + "max_messages": 1000, "avro_config": { "write_metadata": True, + "use_topic_schema": True, }, }, opts = pulumi.ResourceOptions(depends_on=[ @@ -1566,6 +1569,7 @@ def __init__(__self__, "filename_datetime_format": "YYYY-MM-DD/hh_mm_ssZ", "max_bytes": 1000, "max_duration": "300s", + "max_messages": 1000, }, opts = pulumi.ResourceOptions(depends_on=[ example, @@ -1598,8 +1602,10 @@ def __init__(__self__, "filename_datetime_format": "YYYY-MM-DD/hh_mm_ssZ", "max_bytes": 1000, "max_duration": "300s", + "max_messages": 1000, "avro_config": { "write_metadata": True, + "use_topic_schema": True, }, }, opts = pulumi.ResourceOptions(depends_on=[ diff --git a/sdk/python/pulumi_gcp/redis/_inputs.py b/sdk/python/pulumi_gcp/redis/_inputs.py index e5217791db..1e41c842f5 100644 --- a/sdk/python/pulumi_gcp/redis/_inputs.py +++ b/sdk/python/pulumi_gcp/redis/_inputs.py @@ -19,6 +19,14 @@ 'ClusterDiscoveryEndpointArgsDict', 'ClusterDiscoveryEndpointPscConfigArgs', 'ClusterDiscoveryEndpointPscConfigArgsDict', + 'ClusterMaintenancePolicyArgs', + 'ClusterMaintenancePolicyArgsDict', + 'ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs', + 'ClusterMaintenancePolicyWeeklyMaintenanceWindowArgsDict', + 'ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs', + 'ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgsDict', + 'ClusterMaintenanceScheduleArgs', + 'ClusterMaintenanceScheduleArgsDict', 'ClusterPscConfigArgs', 'ClusterPscConfigArgsDict', 'ClusterPscConnectionArgs', @@ -157,6 +165,417 @@ def network(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "network", value) +if not MYPY: + class ClusterMaintenancePolicyArgsDict(TypedDict): + create_time: NotRequired[pulumi.Input[str]] + """ + (Output) + Output only. The time when the policy was created. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + update_time: NotRequired[pulumi.Input[str]] + """ + (Output) + Output only. The time when the policy was last updated. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + weekly_maintenance_windows: NotRequired[pulumi.Input[Sequence[pulumi.Input['ClusterMaintenancePolicyWeeklyMaintenanceWindowArgsDict']]]] + """ + Optional. Maintenance window that is applied to resources covered by this policy. + Minimum 1. For the current version, the maximum number + of weekly_window is expected to be one. + Structure is documented below. + """ +elif False: + ClusterMaintenancePolicyArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class ClusterMaintenancePolicyArgs: + def __init__(__self__, *, + create_time: Optional[pulumi.Input[str]] = None, + update_time: Optional[pulumi.Input[str]] = None, + weekly_maintenance_windows: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs']]]] = None): + """ + :param pulumi.Input[str] create_time: (Output) + Output only. The time when the policy was created. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + :param pulumi.Input[str] update_time: (Output) + Output only. The time when the policy was last updated. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + :param pulumi.Input[Sequence[pulumi.Input['ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs']]] weekly_maintenance_windows: Optional. Maintenance window that is applied to resources covered by this policy. + Minimum 1. For the current version, the maximum number + of weekly_window is expected to be one. + Structure is documented below. + """ + if create_time is not None: + pulumi.set(__self__, "create_time", create_time) + if update_time is not None: + pulumi.set(__self__, "update_time", update_time) + if weekly_maintenance_windows is not None: + pulumi.set(__self__, "weekly_maintenance_windows", weekly_maintenance_windows) + + @property + @pulumi.getter(name="createTime") + def create_time(self) -> Optional[pulumi.Input[str]]: + """ + (Output) + Output only. The time when the policy was created. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + return pulumi.get(self, "create_time") + + @create_time.setter + def create_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "create_time", value) + + @property + @pulumi.getter(name="updateTime") + def update_time(self) -> Optional[pulumi.Input[str]]: + """ + (Output) + Output only. The time when the policy was last updated. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + return pulumi.get(self, "update_time") + + @update_time.setter + def update_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "update_time", value) + + @property + @pulumi.getter(name="weeklyMaintenanceWindows") + def weekly_maintenance_windows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs']]]]: + """ + Optional. Maintenance window that is applied to resources covered by this policy. + Minimum 1. For the current version, the maximum number + of weekly_window is expected to be one. + Structure is documented below. + """ + return pulumi.get(self, "weekly_maintenance_windows") + + @weekly_maintenance_windows.setter + def weekly_maintenance_windows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs']]]]): + pulumi.set(self, "weekly_maintenance_windows", value) + + +if not MYPY: + class ClusterMaintenancePolicyWeeklyMaintenanceWindowArgsDict(TypedDict): + day: pulumi.Input[str] + """ + Required. The day of week that maintenance updates occur. + - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + - MONDAY: Monday + - TUESDAY: Tuesday + - WEDNESDAY: Wednesday + - THURSDAY: Thursday + - FRIDAY: Friday + - SATURDAY: Saturday + - SUNDAY: Sunday + Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + """ + start_time: pulumi.Input['ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgsDict'] + """ + Required. Start time of the window in UTC time. + Structure is documented below. + """ + duration: NotRequired[pulumi.Input[str]] + """ + (Output) + Output only. Duration of the maintenance window. + The current window is fixed at 1 hour. + A duration in seconds with up to nine fractional digits, + terminated by 's'. Example: "3.5s". + """ +elif False: + ClusterMaintenancePolicyWeeklyMaintenanceWindowArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs: + def __init__(__self__, *, + day: pulumi.Input[str], + start_time: pulumi.Input['ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs'], + duration: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] day: Required. The day of week that maintenance updates occur. + - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + - MONDAY: Monday + - TUESDAY: Tuesday + - WEDNESDAY: Wednesday + - THURSDAY: Thursday + - FRIDAY: Friday + - SATURDAY: Saturday + - SUNDAY: Sunday + Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + :param pulumi.Input['ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs'] start_time: Required. Start time of the window in UTC time. + Structure is documented below. + :param pulumi.Input[str] duration: (Output) + Output only. Duration of the maintenance window. + The current window is fixed at 1 hour. + A duration in seconds with up to nine fractional digits, + terminated by 's'. Example: "3.5s". + """ + pulumi.set(__self__, "day", day) + pulumi.set(__self__, "start_time", start_time) + if duration is not None: + pulumi.set(__self__, "duration", duration) + + @property + @pulumi.getter + def day(self) -> pulumi.Input[str]: + """ + Required. The day of week that maintenance updates occur. + - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + - MONDAY: Monday + - TUESDAY: Tuesday + - WEDNESDAY: Wednesday + - THURSDAY: Thursday + - FRIDAY: Friday + - SATURDAY: Saturday + - SUNDAY: Sunday + Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + """ + return pulumi.get(self, "day") + + @day.setter + def day(self, value: pulumi.Input[str]): + pulumi.set(self, "day", value) + + @property + @pulumi.getter(name="startTime") + def start_time(self) -> pulumi.Input['ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs']: + """ + Required. Start time of the window in UTC time. + Structure is documented below. + """ + return pulumi.get(self, "start_time") + + @start_time.setter + def start_time(self, value: pulumi.Input['ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs']): + pulumi.set(self, "start_time", value) + + @property + @pulumi.getter + def duration(self) -> Optional[pulumi.Input[str]]: + """ + (Output) + Output only. Duration of the maintenance window. + The current window is fixed at 1 hour. + A duration in seconds with up to nine fractional digits, + terminated by 's'. Example: "3.5s". + """ + return pulumi.get(self, "duration") + + @duration.setter + def duration(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "duration", value) + + +if not MYPY: + class ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgsDict(TypedDict): + hours: NotRequired[pulumi.Input[int]] + """ + Hours of day in 24 hour format. Should be from 0 to 23. + An API may choose to allow the value "24:00:00" for scenarios like business closing time. + """ + minutes: NotRequired[pulumi.Input[int]] + """ + Minutes of hour of day. Must be from 0 to 59. + """ + nanos: NotRequired[pulumi.Input[int]] + """ + Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + """ + seconds: NotRequired[pulumi.Input[int]] + """ + Seconds of minutes of the time. Must normally be from 0 to 59. + An API may allow the value 60 if it allows leap-seconds. + """ +elif False: + ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs: + def __init__(__self__, *, + hours: Optional[pulumi.Input[int]] = None, + minutes: Optional[pulumi.Input[int]] = None, + nanos: Optional[pulumi.Input[int]] = None, + seconds: Optional[pulumi.Input[int]] = None): + """ + :param pulumi.Input[int] hours: Hours of day in 24 hour format. Should be from 0 to 23. + An API may choose to allow the value "24:00:00" for scenarios like business closing time. + :param pulumi.Input[int] minutes: Minutes of hour of day. Must be from 0 to 59. + :param pulumi.Input[int] nanos: Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + :param pulumi.Input[int] seconds: Seconds of minutes of the time. Must normally be from 0 to 59. + An API may allow the value 60 if it allows leap-seconds. + """ + if hours is not None: + pulumi.set(__self__, "hours", hours) + if minutes is not None: + pulumi.set(__self__, "minutes", minutes) + if nanos is not None: + pulumi.set(__self__, "nanos", nanos) + if seconds is not None: + pulumi.set(__self__, "seconds", seconds) + + @property + @pulumi.getter + def hours(self) -> Optional[pulumi.Input[int]]: + """ + Hours of day in 24 hour format. Should be from 0 to 23. + An API may choose to allow the value "24:00:00" for scenarios like business closing time. + """ + return pulumi.get(self, "hours") + + @hours.setter + def hours(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "hours", value) + + @property + @pulumi.getter + def minutes(self) -> Optional[pulumi.Input[int]]: + """ + Minutes of hour of day. Must be from 0 to 59. + """ + return pulumi.get(self, "minutes") + + @minutes.setter + def minutes(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "minutes", value) + + @property + @pulumi.getter + def nanos(self) -> Optional[pulumi.Input[int]]: + """ + Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + """ + return pulumi.get(self, "nanos") + + @nanos.setter + def nanos(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "nanos", value) + + @property + @pulumi.getter + def seconds(self) -> Optional[pulumi.Input[int]]: + """ + Seconds of minutes of the time. Must normally be from 0 to 59. + An API may allow the value 60 if it allows leap-seconds. + """ + return pulumi.get(self, "seconds") + + @seconds.setter + def seconds(self, value: Optional[pulumi.Input[int]]): + pulumi.set(self, "seconds", value) + + +if not MYPY: + class ClusterMaintenanceScheduleArgsDict(TypedDict): + end_time: NotRequired[pulumi.Input[str]] + """ + (Output) + Output only. The end time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + schedule_deadline_time: NotRequired[pulumi.Input[str]] + """ + (Output) + Output only. The deadline that the maintenance schedule start time + can not go beyond, including reschedule. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + start_time: NotRequired[pulumi.Input[str]] + """ + (Output) + Output only. The start time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ +elif False: + ClusterMaintenanceScheduleArgsDict: TypeAlias = Mapping[str, Any] + +@pulumi.input_type +class ClusterMaintenanceScheduleArgs: + def __init__(__self__, *, + end_time: Optional[pulumi.Input[str]] = None, + schedule_deadline_time: Optional[pulumi.Input[str]] = None, + start_time: Optional[pulumi.Input[str]] = None): + """ + :param pulumi.Input[str] end_time: (Output) + Output only. The end time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + :param pulumi.Input[str] schedule_deadline_time: (Output) + Output only. The deadline that the maintenance schedule start time + can not go beyond, including reschedule. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + :param pulumi.Input[str] start_time: (Output) + Output only. The start time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + if end_time is not None: + pulumi.set(__self__, "end_time", end_time) + if schedule_deadline_time is not None: + pulumi.set(__self__, "schedule_deadline_time", schedule_deadline_time) + if start_time is not None: + pulumi.set(__self__, "start_time", start_time) + + @property + @pulumi.getter(name="endTime") + def end_time(self) -> Optional[pulumi.Input[str]]: + """ + (Output) + Output only. The end time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + return pulumi.get(self, "end_time") + + @end_time.setter + def end_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "end_time", value) + + @property + @pulumi.getter(name="scheduleDeadlineTime") + def schedule_deadline_time(self) -> Optional[pulumi.Input[str]]: + """ + (Output) + Output only. The deadline that the maintenance schedule start time + can not go beyond, including reschedule. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + return pulumi.get(self, "schedule_deadline_time") + + @schedule_deadline_time.setter + def schedule_deadline_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "schedule_deadline_time", value) + + @property + @pulumi.getter(name="startTime") + def start_time(self) -> Optional[pulumi.Input[str]]: + """ + (Output) + Output only. The start time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + return pulumi.get(self, "start_time") + + @start_time.setter + def start_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "start_time", value) + + if not MYPY: class ClusterPscConfigArgsDict(TypedDict): network: pulumi.Input[str] diff --git a/sdk/python/pulumi_gcp/redis/cluster.py b/sdk/python/pulumi_gcp/redis/cluster.py index c3e30e8ff4..c2db20bef8 100644 --- a/sdk/python/pulumi_gcp/redis/cluster.py +++ b/sdk/python/pulumi_gcp/redis/cluster.py @@ -25,6 +25,7 @@ def __init__(__self__, *, shard_count: pulumi.Input[int], authorization_mode: Optional[pulumi.Input[str]] = None, deletion_protection_enabled: Optional[pulumi.Input[bool]] = None, + maintenance_policy: Optional[pulumi.Input['ClusterMaintenancePolicyArgs']] = None, name: Optional[pulumi.Input[str]] = None, node_type: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, @@ -45,6 +46,7 @@ def __init__(__self__, *, "AUTH_MODE_DISABLED"] :param pulumi.Input[bool] deletion_protection_enabled: Optional. Indicates if the cluster is deletion protected or not. If the value if set to true, any delete cluster operation will fail. Default value is true. + :param pulumi.Input['ClusterMaintenancePolicyArgs'] maintenance_policy: Maintenance policy for a cluster :param pulumi.Input[str] name: Unique name of the resource in this scope including project and location using the form: projects/{projectId}/locations/{locationId}/clusters/{clusterId} :param pulumi.Input[str] node_type: The nodeType for the Redis cluster. If not provided, REDIS_HIGHMEM_MEDIUM will be used as default Possible values: @@ -65,6 +67,8 @@ def __init__(__self__, *, pulumi.set(__self__, "authorization_mode", authorization_mode) if deletion_protection_enabled is not None: pulumi.set(__self__, "deletion_protection_enabled", deletion_protection_enabled) + if maintenance_policy is not None: + pulumi.set(__self__, "maintenance_policy", maintenance_policy) if name is not None: pulumi.set(__self__, "name", name) if node_type is not None: @@ -136,6 +140,18 @@ def deletion_protection_enabled(self) -> Optional[pulumi.Input[bool]]: def deletion_protection_enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "deletion_protection_enabled", value) + @property + @pulumi.getter(name="maintenancePolicy") + def maintenance_policy(self) -> Optional[pulumi.Input['ClusterMaintenancePolicyArgs']]: + """ + Maintenance policy for a cluster + """ + return pulumi.get(self, "maintenance_policy") + + @maintenance_policy.setter + def maintenance_policy(self, value: Optional[pulumi.Input['ClusterMaintenancePolicyArgs']]): + pulumi.set(self, "maintenance_policy", value) + @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: @@ -243,6 +259,8 @@ def __init__(__self__, *, create_time: Optional[pulumi.Input[str]] = None, deletion_protection_enabled: Optional[pulumi.Input[bool]] = None, discovery_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterDiscoveryEndpointArgs']]]] = None, + maintenance_policy: Optional[pulumi.Input['ClusterMaintenancePolicyArgs']] = None, + maintenance_schedules: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterMaintenanceScheduleArgs']]]] = None, name: Optional[pulumi.Input[str]] = None, node_type: Optional[pulumi.Input[str]] = None, precise_size_gb: Optional[pulumi.Input[float]] = None, @@ -273,6 +291,9 @@ def __init__(__self__, *, for Redis clients to connect to the cluster. Currently only one endpoint is supported. Structure is documented below. + :param pulumi.Input['ClusterMaintenancePolicyArgs'] maintenance_policy: Maintenance policy for a cluster + :param pulumi.Input[Sequence[pulumi.Input['ClusterMaintenanceScheduleArgs']]] maintenance_schedules: Upcoming maintenance schedule. + Structure is documented below. :param pulumi.Input[str] name: Unique name of the resource in this scope including project and location using the form: projects/{projectId}/locations/{locationId}/clusters/{clusterId} :param pulumi.Input[str] node_type: The nodeType for the Redis cluster. If not provided, REDIS_HIGHMEM_MEDIUM will be used as default Possible values: @@ -308,6 +329,10 @@ def __init__(__self__, *, pulumi.set(__self__, "deletion_protection_enabled", deletion_protection_enabled) if discovery_endpoints is not None: pulumi.set(__self__, "discovery_endpoints", discovery_endpoints) + if maintenance_policy is not None: + pulumi.set(__self__, "maintenance_policy", maintenance_policy) + if maintenance_schedules is not None: + pulumi.set(__self__, "maintenance_schedules", maintenance_schedules) if name is not None: pulumi.set(__self__, "name", name) if node_type is not None: @@ -397,6 +422,31 @@ def discovery_endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['Cl def discovery_endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterDiscoveryEndpointArgs']]]]): pulumi.set(self, "discovery_endpoints", value) + @property + @pulumi.getter(name="maintenancePolicy") + def maintenance_policy(self) -> Optional[pulumi.Input['ClusterMaintenancePolicyArgs']]: + """ + Maintenance policy for a cluster + """ + return pulumi.get(self, "maintenance_policy") + + @maintenance_policy.setter + def maintenance_policy(self, value: Optional[pulumi.Input['ClusterMaintenancePolicyArgs']]): + pulumi.set(self, "maintenance_policy", value) + + @property + @pulumi.getter(name="maintenanceSchedules") + def maintenance_schedules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterMaintenanceScheduleArgs']]]]: + """ + Upcoming maintenance schedule. + Structure is documented below. + """ + return pulumi.get(self, "maintenance_schedules") + + @maintenance_schedules.setter + def maintenance_schedules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterMaintenanceScheduleArgs']]]]): + pulumi.set(self, "maintenance_schedules", value) + @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: @@ -605,6 +655,7 @@ def __init__(__self__, opts: Optional[pulumi.ResourceOptions] = None, authorization_mode: Optional[pulumi.Input[str]] = None, deletion_protection_enabled: Optional[pulumi.Input[bool]] = None, + maintenance_policy: Optional[pulumi.Input[Union['ClusterMaintenancePolicyArgs', 'ClusterMaintenancePolicyArgsDict']]] = None, name: Optional[pulumi.Input[str]] = None, node_type: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, @@ -668,6 +719,17 @@ def __init__(__self__, zone_distribution_config={ "mode": "MULTI_ZONE", }, + maintenance_policy={ + "weekly_maintenance_windows": [{ + "day": "MONDAY", + "start_time": { + "hours": 1, + "minutes": 0, + "seconds": 0, + "nanos": 0, + }, + }], + }, opts = pulumi.ResourceOptions(depends_on=[default])) ``` ### Redis Cluster Ha Single Zone @@ -704,6 +766,17 @@ def __init__(__self__, "mode": "SINGLE_ZONE", "zone": "us-central1-f", }, + maintenance_policy={ + "weekly_maintenance_windows": [{ + "day": "MONDAY", + "start_time": { + "hours": 1, + "minutes": 0, + "seconds": 0, + "nanos": 0, + }, + }], + }, deletion_protection_enabled=True, opts = pulumi.ResourceOptions(depends_on=[default])) ``` @@ -745,6 +818,7 @@ def __init__(__self__, "AUTH_MODE_DISABLED"] :param pulumi.Input[bool] deletion_protection_enabled: Optional. Indicates if the cluster is deletion protected or not. If the value if set to true, any delete cluster operation will fail. Default value is true. + :param pulumi.Input[Union['ClusterMaintenancePolicyArgs', 'ClusterMaintenancePolicyArgsDict']] maintenance_policy: Maintenance policy for a cluster :param pulumi.Input[str] name: Unique name of the resource in this scope including project and location using the form: projects/{projectId}/locations/{locationId}/clusters/{clusterId} :param pulumi.Input[str] node_type: The nodeType for the Redis cluster. If not provided, REDIS_HIGHMEM_MEDIUM will be used as default Possible values: @@ -822,6 +896,17 @@ def __init__(__self__, zone_distribution_config={ "mode": "MULTI_ZONE", }, + maintenance_policy={ + "weekly_maintenance_windows": [{ + "day": "MONDAY", + "start_time": { + "hours": 1, + "minutes": 0, + "seconds": 0, + "nanos": 0, + }, + }], + }, opts = pulumi.ResourceOptions(depends_on=[default])) ``` ### Redis Cluster Ha Single Zone @@ -858,6 +943,17 @@ def __init__(__self__, "mode": "SINGLE_ZONE", "zone": "us-central1-f", }, + maintenance_policy={ + "weekly_maintenance_windows": [{ + "day": "MONDAY", + "start_time": { + "hours": 1, + "minutes": 0, + "seconds": 0, + "nanos": 0, + }, + }], + }, deletion_protection_enabled=True, opts = pulumi.ResourceOptions(depends_on=[default])) ``` @@ -909,6 +1005,7 @@ def _internal_init(__self__, opts: Optional[pulumi.ResourceOptions] = None, authorization_mode: Optional[pulumi.Input[str]] = None, deletion_protection_enabled: Optional[pulumi.Input[bool]] = None, + maintenance_policy: Optional[pulumi.Input[Union['ClusterMaintenancePolicyArgs', 'ClusterMaintenancePolicyArgsDict']]] = None, name: Optional[pulumi.Input[str]] = None, node_type: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, @@ -930,6 +1027,7 @@ def _internal_init(__self__, __props__.__dict__["authorization_mode"] = authorization_mode __props__.__dict__["deletion_protection_enabled"] = deletion_protection_enabled + __props__.__dict__["maintenance_policy"] = maintenance_policy __props__.__dict__["name"] = name __props__.__dict__["node_type"] = node_type __props__.__dict__["project"] = project @@ -946,6 +1044,7 @@ def _internal_init(__self__, __props__.__dict__["zone_distribution_config"] = zone_distribution_config __props__.__dict__["create_time"] = None __props__.__dict__["discovery_endpoints"] = None + __props__.__dict__["maintenance_schedules"] = None __props__.__dict__["precise_size_gb"] = None __props__.__dict__["psc_connections"] = None __props__.__dict__["size_gb"] = None @@ -966,6 +1065,8 @@ def get(resource_name: str, create_time: Optional[pulumi.Input[str]] = None, deletion_protection_enabled: Optional[pulumi.Input[bool]] = None, discovery_endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ClusterDiscoveryEndpointArgs', 'ClusterDiscoveryEndpointArgsDict']]]]] = None, + maintenance_policy: Optional[pulumi.Input[Union['ClusterMaintenancePolicyArgs', 'ClusterMaintenancePolicyArgsDict']]] = None, + maintenance_schedules: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ClusterMaintenanceScheduleArgs', 'ClusterMaintenanceScheduleArgsDict']]]]] = None, name: Optional[pulumi.Input[str]] = None, node_type: Optional[pulumi.Input[str]] = None, precise_size_gb: Optional[pulumi.Input[float]] = None, @@ -1001,6 +1102,9 @@ def get(resource_name: str, for Redis clients to connect to the cluster. Currently only one endpoint is supported. Structure is documented below. + :param pulumi.Input[Union['ClusterMaintenancePolicyArgs', 'ClusterMaintenancePolicyArgsDict']] maintenance_policy: Maintenance policy for a cluster + :param pulumi.Input[Sequence[pulumi.Input[Union['ClusterMaintenanceScheduleArgs', 'ClusterMaintenanceScheduleArgsDict']]]] maintenance_schedules: Upcoming maintenance schedule. + Structure is documented below. :param pulumi.Input[str] name: Unique name of the resource in this scope including project and location using the form: projects/{projectId}/locations/{locationId}/clusters/{clusterId} :param pulumi.Input[str] node_type: The nodeType for the Redis cluster. If not provided, REDIS_HIGHMEM_MEDIUM will be used as default Possible values: @@ -1036,6 +1140,8 @@ def get(resource_name: str, __props__.__dict__["create_time"] = create_time __props__.__dict__["deletion_protection_enabled"] = deletion_protection_enabled __props__.__dict__["discovery_endpoints"] = discovery_endpoints + __props__.__dict__["maintenance_policy"] = maintenance_policy + __props__.__dict__["maintenance_schedules"] = maintenance_schedules __props__.__dict__["name"] = name __props__.__dict__["node_type"] = node_type __props__.__dict__["precise_size_gb"] = precise_size_gb @@ -1094,6 +1200,23 @@ def discovery_endpoints(self) -> pulumi.Output[Sequence['outputs.ClusterDiscover """ return pulumi.get(self, "discovery_endpoints") + @property + @pulumi.getter(name="maintenancePolicy") + def maintenance_policy(self) -> pulumi.Output[Optional['outputs.ClusterMaintenancePolicy']]: + """ + Maintenance policy for a cluster + """ + return pulumi.get(self, "maintenance_policy") + + @property + @pulumi.getter(name="maintenanceSchedules") + def maintenance_schedules(self) -> pulumi.Output[Sequence['outputs.ClusterMaintenanceSchedule']]: + """ + Upcoming maintenance schedule. + Structure is documented below. + """ + return pulumi.get(self, "maintenance_schedules") + @property @pulumi.getter def name(self) -> pulumi.Output[str]: diff --git a/sdk/python/pulumi_gcp/redis/outputs.py b/sdk/python/pulumi_gcp/redis/outputs.py index 6b2b19bbd8..0aa9ccfa94 100644 --- a/sdk/python/pulumi_gcp/redis/outputs.py +++ b/sdk/python/pulumi_gcp/redis/outputs.py @@ -18,6 +18,10 @@ __all__ = [ 'ClusterDiscoveryEndpoint', 'ClusterDiscoveryEndpointPscConfig', + 'ClusterMaintenancePolicy', + 'ClusterMaintenancePolicyWeeklyMaintenanceWindow', + 'ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime', + 'ClusterMaintenanceSchedule', 'ClusterPscConfig', 'ClusterPscConnection', 'ClusterStateInfo', @@ -122,6 +126,317 @@ def network(self) -> Optional[str]: return pulumi.get(self, "network") +@pulumi.output_type +class ClusterMaintenancePolicy(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "createTime": + suggest = "create_time" + elif key == "updateTime": + suggest = "update_time" + elif key == "weeklyMaintenanceWindows": + suggest = "weekly_maintenance_windows" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in ClusterMaintenancePolicy. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + ClusterMaintenancePolicy.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + ClusterMaintenancePolicy.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + create_time: Optional[str] = None, + update_time: Optional[str] = None, + weekly_maintenance_windows: Optional[Sequence['outputs.ClusterMaintenancePolicyWeeklyMaintenanceWindow']] = None): + """ + :param str create_time: (Output) + Output only. The time when the policy was created. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + :param str update_time: (Output) + Output only. The time when the policy was last updated. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + :param Sequence['ClusterMaintenancePolicyWeeklyMaintenanceWindowArgs'] weekly_maintenance_windows: Optional. Maintenance window that is applied to resources covered by this policy. + Minimum 1. For the current version, the maximum number + of weekly_window is expected to be one. + Structure is documented below. + """ + if create_time is not None: + pulumi.set(__self__, "create_time", create_time) + if update_time is not None: + pulumi.set(__self__, "update_time", update_time) + if weekly_maintenance_windows is not None: + pulumi.set(__self__, "weekly_maintenance_windows", weekly_maintenance_windows) + + @property + @pulumi.getter(name="createTime") + def create_time(self) -> Optional[str]: + """ + (Output) + Output only. The time when the policy was created. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + return pulumi.get(self, "create_time") + + @property + @pulumi.getter(name="updateTime") + def update_time(self) -> Optional[str]: + """ + (Output) + Output only. The time when the policy was last updated. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + return pulumi.get(self, "update_time") + + @property + @pulumi.getter(name="weeklyMaintenanceWindows") + def weekly_maintenance_windows(self) -> Optional[Sequence['outputs.ClusterMaintenancePolicyWeeklyMaintenanceWindow']]: + """ + Optional. Maintenance window that is applied to resources covered by this policy. + Minimum 1. For the current version, the maximum number + of weekly_window is expected to be one. + Structure is documented below. + """ + return pulumi.get(self, "weekly_maintenance_windows") + + +@pulumi.output_type +class ClusterMaintenancePolicyWeeklyMaintenanceWindow(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "startTime": + suggest = "start_time" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in ClusterMaintenancePolicyWeeklyMaintenanceWindow. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + ClusterMaintenancePolicyWeeklyMaintenanceWindow.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + ClusterMaintenancePolicyWeeklyMaintenanceWindow.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + day: str, + start_time: 'outputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime', + duration: Optional[str] = None): + """ + :param str day: Required. The day of week that maintenance updates occur. + - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + - MONDAY: Monday + - TUESDAY: Tuesday + - WEDNESDAY: Wednesday + - THURSDAY: Thursday + - FRIDAY: Friday + - SATURDAY: Saturday + - SUNDAY: Sunday + Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + :param 'ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTimeArgs' start_time: Required. Start time of the window in UTC time. + Structure is documented below. + :param str duration: (Output) + Output only. Duration of the maintenance window. + The current window is fixed at 1 hour. + A duration in seconds with up to nine fractional digits, + terminated by 's'. Example: "3.5s". + """ + pulumi.set(__self__, "day", day) + pulumi.set(__self__, "start_time", start_time) + if duration is not None: + pulumi.set(__self__, "duration", duration) + + @property + @pulumi.getter + def day(self) -> str: + """ + Required. The day of week that maintenance updates occur. + - DAY_OF_WEEK_UNSPECIFIED: The day of the week is unspecified. + - MONDAY: Monday + - TUESDAY: Tuesday + - WEDNESDAY: Wednesday + - THURSDAY: Thursday + - FRIDAY: Friday + - SATURDAY: Saturday + - SUNDAY: Sunday + Possible values are: `DAY_OF_WEEK_UNSPECIFIED`, `MONDAY`, `TUESDAY`, `WEDNESDAY`, `THURSDAY`, `FRIDAY`, `SATURDAY`, `SUNDAY`. + """ + return pulumi.get(self, "day") + + @property + @pulumi.getter(name="startTime") + def start_time(self) -> 'outputs.ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime': + """ + Required. Start time of the window in UTC time. + Structure is documented below. + """ + return pulumi.get(self, "start_time") + + @property + @pulumi.getter + def duration(self) -> Optional[str]: + """ + (Output) + Output only. Duration of the maintenance window. + The current window is fixed at 1 hour. + A duration in seconds with up to nine fractional digits, + terminated by 's'. Example: "3.5s". + """ + return pulumi.get(self, "duration") + + +@pulumi.output_type +class ClusterMaintenancePolicyWeeklyMaintenanceWindowStartTime(dict): + def __init__(__self__, *, + hours: Optional[int] = None, + minutes: Optional[int] = None, + nanos: Optional[int] = None, + seconds: Optional[int] = None): + """ + :param int hours: Hours of day in 24 hour format. Should be from 0 to 23. + An API may choose to allow the value "24:00:00" for scenarios like business closing time. + :param int minutes: Minutes of hour of day. Must be from 0 to 59. + :param int nanos: Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + :param int seconds: Seconds of minutes of the time. Must normally be from 0 to 59. + An API may allow the value 60 if it allows leap-seconds. + """ + if hours is not None: + pulumi.set(__self__, "hours", hours) + if minutes is not None: + pulumi.set(__self__, "minutes", minutes) + if nanos is not None: + pulumi.set(__self__, "nanos", nanos) + if seconds is not None: + pulumi.set(__self__, "seconds", seconds) + + @property + @pulumi.getter + def hours(self) -> Optional[int]: + """ + Hours of day in 24 hour format. Should be from 0 to 23. + An API may choose to allow the value "24:00:00" for scenarios like business closing time. + """ + return pulumi.get(self, "hours") + + @property + @pulumi.getter + def minutes(self) -> Optional[int]: + """ + Minutes of hour of day. Must be from 0 to 59. + """ + return pulumi.get(self, "minutes") + + @property + @pulumi.getter + def nanos(self) -> Optional[int]: + """ + Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + """ + return pulumi.get(self, "nanos") + + @property + @pulumi.getter + def seconds(self) -> Optional[int]: + """ + Seconds of minutes of the time. Must normally be from 0 to 59. + An API may allow the value 60 if it allows leap-seconds. + """ + return pulumi.get(self, "seconds") + + +@pulumi.output_type +class ClusterMaintenanceSchedule(dict): + @staticmethod + def __key_warning(key: str): + suggest = None + if key == "endTime": + suggest = "end_time" + elif key == "scheduleDeadlineTime": + suggest = "schedule_deadline_time" + elif key == "startTime": + suggest = "start_time" + + if suggest: + pulumi.log.warn(f"Key '{key}' not found in ClusterMaintenanceSchedule. Access the value via the '{suggest}' property getter instead.") + + def __getitem__(self, key: str) -> Any: + ClusterMaintenanceSchedule.__key_warning(key) + return super().__getitem__(key) + + def get(self, key: str, default = None) -> Any: + ClusterMaintenanceSchedule.__key_warning(key) + return super().get(key, default) + + def __init__(__self__, *, + end_time: Optional[str] = None, + schedule_deadline_time: Optional[str] = None, + start_time: Optional[str] = None): + """ + :param str end_time: (Output) + Output only. The end time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + :param str schedule_deadline_time: (Output) + Output only. The deadline that the maintenance schedule start time + can not go beyond, including reschedule. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + :param str start_time: (Output) + Output only. The start time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + if end_time is not None: + pulumi.set(__self__, "end_time", end_time) + if schedule_deadline_time is not None: + pulumi.set(__self__, "schedule_deadline_time", schedule_deadline_time) + if start_time is not None: + pulumi.set(__self__, "start_time", start_time) + + @property + @pulumi.getter(name="endTime") + def end_time(self) -> Optional[str]: + """ + (Output) + Output only. The end time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + return pulumi.get(self, "end_time") + + @property + @pulumi.getter(name="scheduleDeadlineTime") + def schedule_deadline_time(self) -> Optional[str]: + """ + (Output) + Output only. The deadline that the maintenance schedule start time + can not go beyond, including reschedule. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + return pulumi.get(self, "schedule_deadline_time") + + @property + @pulumi.getter(name="startTime") + def start_time(self) -> Optional[str]: + """ + (Output) + Output only. The start time of any upcoming scheduled maintenance for this cluster. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond + resolution and up to nine fractional digits. + """ + return pulumi.get(self, "start_time") + + @pulumi.output_type class ClusterPscConfig(dict): def __init__(__self__, *, diff --git a/sdk/python/pulumi_gcp/securitycenter/__init__.py b/sdk/python/pulumi_gcp/securitycenter/__init__.py index 1c914a36af..2ebd540979 100644 --- a/sdk/python/pulumi_gcp/securitycenter/__init__.py +++ b/sdk/python/pulumi_gcp/securitycenter/__init__.py @@ -28,6 +28,7 @@ from .source_iam_policy import * from .v2_folder_mute_config import * from .v2_folder_notification_config import * +from .v2_folder_scc_big_query_export import * from .v2_organization_mute_config import * from .v2_organization_notification_config import * from .v2_organization_scc_big_query_exports import * @@ -37,5 +38,6 @@ from .v2_organization_source_iam_policy import * from .v2_project_mute_config import * from .v2_project_notification_config import * +from .v2_project_scc_big_query_export import * from ._inputs import * from . import outputs diff --git a/sdk/python/pulumi_gcp/securitycenter/v2_folder_scc_big_query_export.py b/sdk/python/pulumi_gcp/securitycenter/v2_folder_scc_big_query_export.py new file mode 100644 index 0000000000..860aeb9eaf --- /dev/null +++ b/sdk/python/pulumi_gcp/securitycenter/v2_folder_scc_big_query_export.py @@ -0,0 +1,857 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import sys +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +if sys.version_info >= (3, 11): + from typing import NotRequired, TypedDict, TypeAlias +else: + from typing_extensions import NotRequired, TypedDict, TypeAlias +from .. import _utilities + +__all__ = ['V2FolderSccBigQueryExportArgs', 'V2FolderSccBigQueryExport'] + +@pulumi.input_type +class V2FolderSccBigQueryExportArgs: + def __init__(__self__, *, + big_query_export_id: pulumi.Input[str], + folder: pulumi.Input[str], + dataset: Optional[pulumi.Input[str]] = None, + description: Optional[pulumi.Input[str]] = None, + filter: Optional[pulumi.Input[str]] = None, + location: Optional[pulumi.Input[str]] = None): + """ + The set of arguments for constructing a V2FolderSccBigQueryExport resource. + :param pulumi.Input[str] big_query_export_id: This must be unique within the organization. It must consist of only lowercase letters, + numbers, and hyphens, must start with a letter, must end with either a letter or a number, + and must be 63 characters or less. + + + - - - + :param pulumi.Input[str] folder: The folder where Cloud Security Command Center Big Query Export + Config lives in. + :param pulumi.Input[str] dataset: The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + :param pulumi.Input[str] description: The description of the notification config (max of 1024 characters). + :param pulumi.Input[str] filter: Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + The supported value types are: + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + :param pulumi.Input[str] location: The BigQuery export configuration is stored in this location. If not provided, Use global as default. + """ + pulumi.set(__self__, "big_query_export_id", big_query_export_id) + pulumi.set(__self__, "folder", folder) + if dataset is not None: + pulumi.set(__self__, "dataset", dataset) + if description is not None: + pulumi.set(__self__, "description", description) + if filter is not None: + pulumi.set(__self__, "filter", filter) + if location is not None: + pulumi.set(__self__, "location", location) + + @property + @pulumi.getter(name="bigQueryExportId") + def big_query_export_id(self) -> pulumi.Input[str]: + """ + This must be unique within the organization. It must consist of only lowercase letters, + numbers, and hyphens, must start with a letter, must end with either a letter or a number, + and must be 63 characters or less. + + + - - - + """ + return pulumi.get(self, "big_query_export_id") + + @big_query_export_id.setter + def big_query_export_id(self, value: pulumi.Input[str]): + pulumi.set(self, "big_query_export_id", value) + + @property + @pulumi.getter + def folder(self) -> pulumi.Input[str]: + """ + The folder where Cloud Security Command Center Big Query Export + Config lives in. + """ + return pulumi.get(self, "folder") + + @folder.setter + def folder(self, value: pulumi.Input[str]): + pulumi.set(self, "folder", value) + + @property + @pulumi.getter + def dataset(self) -> Optional[pulumi.Input[str]]: + """ + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + """ + return pulumi.get(self, "dataset") + + @dataset.setter + def dataset(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "dataset", value) + + @property + @pulumi.getter + def description(self) -> Optional[pulumi.Input[str]]: + """ + The description of the notification config (max of 1024 characters). + """ + return pulumi.get(self, "description") + + @description.setter + def description(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "description", value) + + @property + @pulumi.getter + def filter(self) -> Optional[pulumi.Input[str]]: + """ + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + The supported value types are: + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + """ + return pulumi.get(self, "filter") + + @filter.setter + def filter(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "filter", value) + + @property + @pulumi.getter + def location(self) -> Optional[pulumi.Input[str]]: + """ + The BigQuery export configuration is stored in this location. If not provided, Use global as default. + """ + return pulumi.get(self, "location") + + @location.setter + def location(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "location", value) + + +@pulumi.input_type +class _V2FolderSccBigQueryExportState: + def __init__(__self__, *, + big_query_export_id: Optional[pulumi.Input[str]] = None, + create_time: Optional[pulumi.Input[str]] = None, + dataset: Optional[pulumi.Input[str]] = None, + description: Optional[pulumi.Input[str]] = None, + filter: Optional[pulumi.Input[str]] = None, + folder: Optional[pulumi.Input[str]] = None, + location: Optional[pulumi.Input[str]] = None, + most_recent_editor: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + principal: Optional[pulumi.Input[str]] = None, + update_time: Optional[pulumi.Input[str]] = None): + """ + Input properties used for looking up and filtering V2FolderSccBigQueryExport resources. + :param pulumi.Input[str] big_query_export_id: This must be unique within the organization. It must consist of only lowercase letters, + numbers, and hyphens, must start with a letter, must end with either a letter or a number, + and must be 63 characters or less. + + + - - - + :param pulumi.Input[str] create_time: The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + :param pulumi.Input[str] dataset: The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + :param pulumi.Input[str] description: The description of the notification config (max of 1024 characters). + :param pulumi.Input[str] filter: Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + The supported value types are: + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + :param pulumi.Input[str] folder: The folder where Cloud Security Command Center Big Query Export + Config lives in. + :param pulumi.Input[str] location: The BigQuery export configuration is stored in this location. If not provided, Use global as default. + :param pulumi.Input[str] most_recent_editor: Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + :param pulumi.Input[str] name: The resource name of this export, in the format + `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + :param pulumi.Input[str] principal: The service account that needs permission to create table and upload data to the BigQuery dataset. + :param pulumi.Input[str] update_time: The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + """ + if big_query_export_id is not None: + pulumi.set(__self__, "big_query_export_id", big_query_export_id) + if create_time is not None: + pulumi.set(__self__, "create_time", create_time) + if dataset is not None: + pulumi.set(__self__, "dataset", dataset) + if description is not None: + pulumi.set(__self__, "description", description) + if filter is not None: + pulumi.set(__self__, "filter", filter) + if folder is not None: + pulumi.set(__self__, "folder", folder) + if location is not None: + pulumi.set(__self__, "location", location) + if most_recent_editor is not None: + pulumi.set(__self__, "most_recent_editor", most_recent_editor) + if name is not None: + pulumi.set(__self__, "name", name) + if principal is not None: + pulumi.set(__self__, "principal", principal) + if update_time is not None: + pulumi.set(__self__, "update_time", update_time) + + @property + @pulumi.getter(name="bigQueryExportId") + def big_query_export_id(self) -> Optional[pulumi.Input[str]]: + """ + This must be unique within the organization. It must consist of only lowercase letters, + numbers, and hyphens, must start with a letter, must end with either a letter or a number, + and must be 63 characters or less. + + + - - - + """ + return pulumi.get(self, "big_query_export_id") + + @big_query_export_id.setter + def big_query_export_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "big_query_export_id", value) + + @property + @pulumi.getter(name="createTime") + def create_time(self) -> Optional[pulumi.Input[str]]: + """ + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + """ + return pulumi.get(self, "create_time") + + @create_time.setter + def create_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "create_time", value) + + @property + @pulumi.getter + def dataset(self) -> Optional[pulumi.Input[str]]: + """ + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + """ + return pulumi.get(self, "dataset") + + @dataset.setter + def dataset(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "dataset", value) + + @property + @pulumi.getter + def description(self) -> Optional[pulumi.Input[str]]: + """ + The description of the notification config (max of 1024 characters). + """ + return pulumi.get(self, "description") + + @description.setter + def description(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "description", value) + + @property + @pulumi.getter + def filter(self) -> Optional[pulumi.Input[str]]: + """ + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + The supported value types are: + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + """ + return pulumi.get(self, "filter") + + @filter.setter + def filter(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "filter", value) + + @property + @pulumi.getter + def folder(self) -> Optional[pulumi.Input[str]]: + """ + The folder where Cloud Security Command Center Big Query Export + Config lives in. + """ + return pulumi.get(self, "folder") + + @folder.setter + def folder(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "folder", value) + + @property + @pulumi.getter + def location(self) -> Optional[pulumi.Input[str]]: + """ + The BigQuery export configuration is stored in this location. If not provided, Use global as default. + """ + return pulumi.get(self, "location") + + @location.setter + def location(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "location", value) + + @property + @pulumi.getter(name="mostRecentEditor") + def most_recent_editor(self) -> Optional[pulumi.Input[str]]: + """ + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + """ + return pulumi.get(self, "most_recent_editor") + + @most_recent_editor.setter + def most_recent_editor(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "most_recent_editor", value) + + @property + @pulumi.getter + def name(self) -> Optional[pulumi.Input[str]]: + """ + The resource name of this export, in the format + `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + """ + return pulumi.get(self, "name") + + @name.setter + def name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "name", value) + + @property + @pulumi.getter + def principal(self) -> Optional[pulumi.Input[str]]: + """ + The service account that needs permission to create table and upload data to the BigQuery dataset. + """ + return pulumi.get(self, "principal") + + @principal.setter + def principal(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "principal", value) + + @property + @pulumi.getter(name="updateTime") + def update_time(self) -> Optional[pulumi.Input[str]]: + """ + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + """ + return pulumi.get(self, "update_time") + + @update_time.setter + def update_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "update_time", value) + + +class V2FolderSccBigQueryExport(pulumi.CustomResource): + @overload + def __init__(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + big_query_export_id: Optional[pulumi.Input[str]] = None, + dataset: Optional[pulumi.Input[str]] = None, + description: Optional[pulumi.Input[str]] = None, + filter: Optional[pulumi.Input[str]] = None, + folder: Optional[pulumi.Input[str]] = None, + location: Optional[pulumi.Input[str]] = None, + __props__=None): + """ + A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + It represents exporting Security Command Center data, including assets, findings, and security marks + using gcloud scc bqexports + > **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. + + To get more information about FolderSccBigQueryExport, see: + + * [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/folders.locations.bigQueryExports) + * How-to Guides + * [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query) + + ## Example Usage + + ### Scc V2 Folder Big Query Export Config Basic + + ```python + import pulumi + import pulumi_gcp as gcp + + folder = gcp.organizations.Folder("folder", + parent="organizations/123456789", + display_name="folder-name", + deletion_protection=False) + default = gcp.bigquery.Dataset("default", + dataset_id="my_dataset_id", + friendly_name="test", + description="This is a test description", + location="US", + default_table_expiration_ms=3600000, + default_partition_expiration_ms=None, + labels={ + "env": "default", + }) + custom_big_query_export_config = gcp.securitycenter.V2FolderSccBigQueryExport("custom_big_query_export_config", + big_query_export_id="my-export", + folder=folder.folder_id, + dataset=default.id, + location="global", + description="Cloud Security Command Center Findings Big Query Export Config", + filter="state=\\"ACTIVE\\" AND NOT mute=\\"MUTED\\"") + ``` + + ## Import + + FolderSccBigQueryExport can be imported using any of these accepted formats: + + * `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}` + + * `{{folder}}/{{location}}/{{big_query_export_id}}` + + When using the `pulumi import` command, FolderSccBigQueryExport can be imported using one of the formats above. For example: + + ```sh + $ pulumi import gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport default folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} + ``` + + ```sh + $ pulumi import gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport default {{folder}}/{{location}}/{{big_query_export_id}} + ``` + + :param str resource_name: The name of the resource. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] big_query_export_id: This must be unique within the organization. It must consist of only lowercase letters, + numbers, and hyphens, must start with a letter, must end with either a letter or a number, + and must be 63 characters or less. + + + - - - + :param pulumi.Input[str] dataset: The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + :param pulumi.Input[str] description: The description of the notification config (max of 1024 characters). + :param pulumi.Input[str] filter: Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + The supported value types are: + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + :param pulumi.Input[str] folder: The folder where Cloud Security Command Center Big Query Export + Config lives in. + :param pulumi.Input[str] location: The BigQuery export configuration is stored in this location. If not provided, Use global as default. + """ + ... + @overload + def __init__(__self__, + resource_name: str, + args: V2FolderSccBigQueryExportArgs, + opts: Optional[pulumi.ResourceOptions] = None): + """ + A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + It represents exporting Security Command Center data, including assets, findings, and security marks + using gcloud scc bqexports + > **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. + + To get more information about FolderSccBigQueryExport, see: + + * [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/folders.locations.bigQueryExports) + * How-to Guides + * [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query) + + ## Example Usage + + ### Scc V2 Folder Big Query Export Config Basic + + ```python + import pulumi + import pulumi_gcp as gcp + + folder = gcp.organizations.Folder("folder", + parent="organizations/123456789", + display_name="folder-name", + deletion_protection=False) + default = gcp.bigquery.Dataset("default", + dataset_id="my_dataset_id", + friendly_name="test", + description="This is a test description", + location="US", + default_table_expiration_ms=3600000, + default_partition_expiration_ms=None, + labels={ + "env": "default", + }) + custom_big_query_export_config = gcp.securitycenter.V2FolderSccBigQueryExport("custom_big_query_export_config", + big_query_export_id="my-export", + folder=folder.folder_id, + dataset=default.id, + location="global", + description="Cloud Security Command Center Findings Big Query Export Config", + filter="state=\\"ACTIVE\\" AND NOT mute=\\"MUTED\\"") + ``` + + ## Import + + FolderSccBigQueryExport can be imported using any of these accepted formats: + + * `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}` + + * `{{folder}}/{{location}}/{{big_query_export_id}}` + + When using the `pulumi import` command, FolderSccBigQueryExport can be imported using one of the formats above. For example: + + ```sh + $ pulumi import gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport default folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} + ``` + + ```sh + $ pulumi import gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport default {{folder}}/{{location}}/{{big_query_export_id}} + ``` + + :param str resource_name: The name of the resource. + :param V2FolderSccBigQueryExportArgs args: The arguments to use to populate this resource's properties. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + ... + def __init__(__self__, resource_name: str, *args, **kwargs): + resource_args, opts = _utilities.get_resource_args_opts(V2FolderSccBigQueryExportArgs, pulumi.ResourceOptions, *args, **kwargs) + if resource_args is not None: + __self__._internal_init(resource_name, opts, **resource_args.__dict__) + else: + __self__._internal_init(resource_name, *args, **kwargs) + + def _internal_init(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + big_query_export_id: Optional[pulumi.Input[str]] = None, + dataset: Optional[pulumi.Input[str]] = None, + description: Optional[pulumi.Input[str]] = None, + filter: Optional[pulumi.Input[str]] = None, + folder: Optional[pulumi.Input[str]] = None, + location: Optional[pulumi.Input[str]] = None, + __props__=None): + opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) + if not isinstance(opts, pulumi.ResourceOptions): + raise TypeError('Expected resource options to be a ResourceOptions instance') + if opts.id is None: + if __props__ is not None: + raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') + __props__ = V2FolderSccBigQueryExportArgs.__new__(V2FolderSccBigQueryExportArgs) + + if big_query_export_id is None and not opts.urn: + raise TypeError("Missing required property 'big_query_export_id'") + __props__.__dict__["big_query_export_id"] = big_query_export_id + __props__.__dict__["dataset"] = dataset + __props__.__dict__["description"] = description + __props__.__dict__["filter"] = filter + if folder is None and not opts.urn: + raise TypeError("Missing required property 'folder'") + __props__.__dict__["folder"] = folder + __props__.__dict__["location"] = location + __props__.__dict__["create_time"] = None + __props__.__dict__["most_recent_editor"] = None + __props__.__dict__["name"] = None + __props__.__dict__["principal"] = None + __props__.__dict__["update_time"] = None + super(V2FolderSccBigQueryExport, __self__).__init__( + 'gcp:securitycenter/v2FolderSccBigQueryExport:V2FolderSccBigQueryExport', + resource_name, + __props__, + opts) + + @staticmethod + def get(resource_name: str, + id: pulumi.Input[str], + opts: Optional[pulumi.ResourceOptions] = None, + big_query_export_id: Optional[pulumi.Input[str]] = None, + create_time: Optional[pulumi.Input[str]] = None, + dataset: Optional[pulumi.Input[str]] = None, + description: Optional[pulumi.Input[str]] = None, + filter: Optional[pulumi.Input[str]] = None, + folder: Optional[pulumi.Input[str]] = None, + location: Optional[pulumi.Input[str]] = None, + most_recent_editor: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + principal: Optional[pulumi.Input[str]] = None, + update_time: Optional[pulumi.Input[str]] = None) -> 'V2FolderSccBigQueryExport': + """ + Get an existing V2FolderSccBigQueryExport resource's state with the given name, id, and optional extra + properties used to qualify the lookup. + + :param str resource_name: The unique name of the resulting resource. + :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] big_query_export_id: This must be unique within the organization. It must consist of only lowercase letters, + numbers, and hyphens, must start with a letter, must end with either a letter or a number, + and must be 63 characters or less. + + + - - - + :param pulumi.Input[str] create_time: The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + :param pulumi.Input[str] dataset: The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + :param pulumi.Input[str] description: The description of the notification config (max of 1024 characters). + :param pulumi.Input[str] filter: Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + The supported value types are: + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + :param pulumi.Input[str] folder: The folder where Cloud Security Command Center Big Query Export + Config lives in. + :param pulumi.Input[str] location: The BigQuery export configuration is stored in this location. If not provided, Use global as default. + :param pulumi.Input[str] most_recent_editor: Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + :param pulumi.Input[str] name: The resource name of this export, in the format + `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + :param pulumi.Input[str] principal: The service account that needs permission to create table and upload data to the BigQuery dataset. + :param pulumi.Input[str] update_time: The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + """ + opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) + + __props__ = _V2FolderSccBigQueryExportState.__new__(_V2FolderSccBigQueryExportState) + + __props__.__dict__["big_query_export_id"] = big_query_export_id + __props__.__dict__["create_time"] = create_time + __props__.__dict__["dataset"] = dataset + __props__.__dict__["description"] = description + __props__.__dict__["filter"] = filter + __props__.__dict__["folder"] = folder + __props__.__dict__["location"] = location + __props__.__dict__["most_recent_editor"] = most_recent_editor + __props__.__dict__["name"] = name + __props__.__dict__["principal"] = principal + __props__.__dict__["update_time"] = update_time + return V2FolderSccBigQueryExport(resource_name, opts=opts, __props__=__props__) + + @property + @pulumi.getter(name="bigQueryExportId") + def big_query_export_id(self) -> pulumi.Output[str]: + """ + This must be unique within the organization. It must consist of only lowercase letters, + numbers, and hyphens, must start with a letter, must end with either a letter or a number, + and must be 63 characters or less. + + + - - - + """ + return pulumi.get(self, "big_query_export_id") + + @property + @pulumi.getter(name="createTime") + def create_time(self) -> pulumi.Output[str]: + """ + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + """ + return pulumi.get(self, "create_time") + + @property + @pulumi.getter + def dataset(self) -> pulumi.Output[Optional[str]]: + """ + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + """ + return pulumi.get(self, "dataset") + + @property + @pulumi.getter + def description(self) -> pulumi.Output[Optional[str]]: + """ + The description of the notification config (max of 1024 characters). + """ + return pulumi.get(self, "description") + + @property + @pulumi.getter + def filter(self) -> pulumi.Output[Optional[str]]: + """ + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + The supported value types are: + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + """ + return pulumi.get(self, "filter") + + @property + @pulumi.getter + def folder(self) -> pulumi.Output[str]: + """ + The folder where Cloud Security Command Center Big Query Export + Config lives in. + """ + return pulumi.get(self, "folder") + + @property + @pulumi.getter + def location(self) -> pulumi.Output[Optional[str]]: + """ + The BigQuery export configuration is stored in this location. If not provided, Use global as default. + """ + return pulumi.get(self, "location") + + @property + @pulumi.getter(name="mostRecentEditor") + def most_recent_editor(self) -> pulumi.Output[str]: + """ + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + """ + return pulumi.get(self, "most_recent_editor") + + @property + @pulumi.getter + def name(self) -> pulumi.Output[str]: + """ + The resource name of this export, in the format + `folders/{{folder}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter + def principal(self) -> pulumi.Output[str]: + """ + The service account that needs permission to create table and upload data to the BigQuery dataset. + """ + return pulumi.get(self, "principal") + + @property + @pulumi.getter(name="updateTime") + def update_time(self) -> pulumi.Output[str]: + """ + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + """ + return pulumi.get(self, "update_time") + diff --git a/sdk/python/pulumi_gcp/securitycenter/v2_organization_scc_big_query_exports.py b/sdk/python/pulumi_gcp/securitycenter/v2_organization_scc_big_query_exports.py index 49384e3558..bc298857fe 100644 --- a/sdk/python/pulumi_gcp/securitycenter/v2_organization_scc_big_query_exports.py +++ b/sdk/python/pulumi_gcp/securitycenter/v2_organization_scc_big_query_exports.py @@ -468,7 +468,7 @@ def __init__(__self__, import pulumi_gcp as gcp default = gcp.bigquery.Dataset("default", - dataset_id="my_dataset_id", + dataset_id="", friendly_name="test", description="This is a test description", location="US", @@ -481,7 +481,7 @@ def __init__(__self__, name="my-export", big_query_export_id="my-export", organization="123456789", - dataset="my-dataset", + dataset=default.id, location="global", description="Cloud Security Command Center Findings Big Query Export Config", filter="state=\\"ACTIVE\\" AND NOT mute=\\"MUTED\\"") @@ -570,7 +570,7 @@ def __init__(__self__, import pulumi_gcp as gcp default = gcp.bigquery.Dataset("default", - dataset_id="my_dataset_id", + dataset_id="", friendly_name="test", description="This is a test description", location="US", @@ -583,7 +583,7 @@ def __init__(__self__, name="my-export", big_query_export_id="my-export", organization="123456789", - dataset="my-dataset", + dataset=default.id, location="global", description="Cloud Security Command Center Findings Big Query Export Config", filter="state=\\"ACTIVE\\" AND NOT mute=\\"MUTED\\"") diff --git a/sdk/python/pulumi_gcp/securitycenter/v2_project_scc_big_query_export.py b/sdk/python/pulumi_gcp/securitycenter/v2_project_scc_big_query_export.py new file mode 100644 index 0000000000..48c0b1f9b9 --- /dev/null +++ b/sdk/python/pulumi_gcp/securitycenter/v2_project_scc_big_query_export.py @@ -0,0 +1,796 @@ +# coding=utf-8 +# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** +# *** Do not edit by hand unless you're certain you know what you are doing! *** + +import copy +import warnings +import sys +import pulumi +import pulumi.runtime +from typing import Any, Mapping, Optional, Sequence, Union, overload +if sys.version_info >= (3, 11): + from typing import NotRequired, TypedDict, TypeAlias +else: + from typing_extensions import NotRequired, TypedDict, TypeAlias +from .. import _utilities + +__all__ = ['V2ProjectSccBigQueryExportArgs', 'V2ProjectSccBigQueryExport'] + +@pulumi.input_type +class V2ProjectSccBigQueryExportArgs: + def __init__(__self__, *, + big_query_export_id: pulumi.Input[str], + dataset: Optional[pulumi.Input[str]] = None, + description: Optional[pulumi.Input[str]] = None, + filter: Optional[pulumi.Input[str]] = None, + location: Optional[pulumi.Input[str]] = None, + project: Optional[pulumi.Input[str]] = None): + """ + The set of arguments for constructing a V2ProjectSccBigQueryExport resource. + :param pulumi.Input[str] big_query_export_id: This must be unique within the organization. + + + - - - + :param pulumi.Input[str] dataset: The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + :param pulumi.Input[str] description: The description of the notification config (max of 1024 characters). + :param pulumi.Input[str] filter: Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + The supported value types are: + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + :param pulumi.Input[str] location: location Id is provided by organization. If not provided, Use global as default. + :param pulumi.Input[str] project: The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + """ + pulumi.set(__self__, "big_query_export_id", big_query_export_id) + if dataset is not None: + pulumi.set(__self__, "dataset", dataset) + if description is not None: + pulumi.set(__self__, "description", description) + if filter is not None: + pulumi.set(__self__, "filter", filter) + if location is not None: + pulumi.set(__self__, "location", location) + if project is not None: + pulumi.set(__self__, "project", project) + + @property + @pulumi.getter(name="bigQueryExportId") + def big_query_export_id(self) -> pulumi.Input[str]: + """ + This must be unique within the organization. + + + - - - + """ + return pulumi.get(self, "big_query_export_id") + + @big_query_export_id.setter + def big_query_export_id(self, value: pulumi.Input[str]): + pulumi.set(self, "big_query_export_id", value) + + @property + @pulumi.getter + def dataset(self) -> Optional[pulumi.Input[str]]: + """ + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + """ + return pulumi.get(self, "dataset") + + @dataset.setter + def dataset(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "dataset", value) + + @property + @pulumi.getter + def description(self) -> Optional[pulumi.Input[str]]: + """ + The description of the notification config (max of 1024 characters). + """ + return pulumi.get(self, "description") + + @description.setter + def description(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "description", value) + + @property + @pulumi.getter + def filter(self) -> Optional[pulumi.Input[str]]: + """ + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + The supported value types are: + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + """ + return pulumi.get(self, "filter") + + @filter.setter + def filter(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "filter", value) + + @property + @pulumi.getter + def location(self) -> Optional[pulumi.Input[str]]: + """ + location Id is provided by organization. If not provided, Use global as default. + """ + return pulumi.get(self, "location") + + @location.setter + def location(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "location", value) + + @property + @pulumi.getter + def project(self) -> Optional[pulumi.Input[str]]: + """ + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + """ + return pulumi.get(self, "project") + + @project.setter + def project(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "project", value) + + +@pulumi.input_type +class _V2ProjectSccBigQueryExportState: + def __init__(__self__, *, + big_query_export_id: Optional[pulumi.Input[str]] = None, + create_time: Optional[pulumi.Input[str]] = None, + dataset: Optional[pulumi.Input[str]] = None, + description: Optional[pulumi.Input[str]] = None, + filter: Optional[pulumi.Input[str]] = None, + location: Optional[pulumi.Input[str]] = None, + most_recent_editor: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + principal: Optional[pulumi.Input[str]] = None, + project: Optional[pulumi.Input[str]] = None, + update_time: Optional[pulumi.Input[str]] = None): + """ + Input properties used for looking up and filtering V2ProjectSccBigQueryExport resources. + :param pulumi.Input[str] big_query_export_id: This must be unique within the organization. + + + - - - + :param pulumi.Input[str] create_time: The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + :param pulumi.Input[str] dataset: The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + :param pulumi.Input[str] description: The description of the notification config (max of 1024 characters). + :param pulumi.Input[str] filter: Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + The supported value types are: + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + :param pulumi.Input[str] location: location Id is provided by organization. If not provided, Use global as default. + :param pulumi.Input[str] most_recent_editor: Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + :param pulumi.Input[str] name: The resource name of this export, in the format + `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + :param pulumi.Input[str] principal: The service account that needs permission to create table and upload data to the BigQuery dataset. + :param pulumi.Input[str] project: The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + :param pulumi.Input[str] update_time: The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + """ + if big_query_export_id is not None: + pulumi.set(__self__, "big_query_export_id", big_query_export_id) + if create_time is not None: + pulumi.set(__self__, "create_time", create_time) + if dataset is not None: + pulumi.set(__self__, "dataset", dataset) + if description is not None: + pulumi.set(__self__, "description", description) + if filter is not None: + pulumi.set(__self__, "filter", filter) + if location is not None: + pulumi.set(__self__, "location", location) + if most_recent_editor is not None: + pulumi.set(__self__, "most_recent_editor", most_recent_editor) + if name is not None: + pulumi.set(__self__, "name", name) + if principal is not None: + pulumi.set(__self__, "principal", principal) + if project is not None: + pulumi.set(__self__, "project", project) + if update_time is not None: + pulumi.set(__self__, "update_time", update_time) + + @property + @pulumi.getter(name="bigQueryExportId") + def big_query_export_id(self) -> Optional[pulumi.Input[str]]: + """ + This must be unique within the organization. + + + - - - + """ + return pulumi.get(self, "big_query_export_id") + + @big_query_export_id.setter + def big_query_export_id(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "big_query_export_id", value) + + @property + @pulumi.getter(name="createTime") + def create_time(self) -> Optional[pulumi.Input[str]]: + """ + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + """ + return pulumi.get(self, "create_time") + + @create_time.setter + def create_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "create_time", value) + + @property + @pulumi.getter + def dataset(self) -> Optional[pulumi.Input[str]]: + """ + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + """ + return pulumi.get(self, "dataset") + + @dataset.setter + def dataset(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "dataset", value) + + @property + @pulumi.getter + def description(self) -> Optional[pulumi.Input[str]]: + """ + The description of the notification config (max of 1024 characters). + """ + return pulumi.get(self, "description") + + @description.setter + def description(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "description", value) + + @property + @pulumi.getter + def filter(self) -> Optional[pulumi.Input[str]]: + """ + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + The supported value types are: + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + """ + return pulumi.get(self, "filter") + + @filter.setter + def filter(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "filter", value) + + @property + @pulumi.getter + def location(self) -> Optional[pulumi.Input[str]]: + """ + location Id is provided by organization. If not provided, Use global as default. + """ + return pulumi.get(self, "location") + + @location.setter + def location(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "location", value) + + @property + @pulumi.getter(name="mostRecentEditor") + def most_recent_editor(self) -> Optional[pulumi.Input[str]]: + """ + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + """ + return pulumi.get(self, "most_recent_editor") + + @most_recent_editor.setter + def most_recent_editor(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "most_recent_editor", value) + + @property + @pulumi.getter + def name(self) -> Optional[pulumi.Input[str]]: + """ + The resource name of this export, in the format + `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + """ + return pulumi.get(self, "name") + + @name.setter + def name(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "name", value) + + @property + @pulumi.getter + def principal(self) -> Optional[pulumi.Input[str]]: + """ + The service account that needs permission to create table and upload data to the BigQuery dataset. + """ + return pulumi.get(self, "principal") + + @principal.setter + def principal(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "principal", value) + + @property + @pulumi.getter + def project(self) -> Optional[pulumi.Input[str]]: + """ + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + """ + return pulumi.get(self, "project") + + @project.setter + def project(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "project", value) + + @property + @pulumi.getter(name="updateTime") + def update_time(self) -> Optional[pulumi.Input[str]]: + """ + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + """ + return pulumi.get(self, "update_time") + + @update_time.setter + def update_time(self, value: Optional[pulumi.Input[str]]): + pulumi.set(self, "update_time", value) + + +class V2ProjectSccBigQueryExport(pulumi.CustomResource): + @overload + def __init__(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + big_query_export_id: Optional[pulumi.Input[str]] = None, + dataset: Optional[pulumi.Input[str]] = None, + description: Optional[pulumi.Input[str]] = None, + filter: Optional[pulumi.Input[str]] = None, + location: Optional[pulumi.Input[str]] = None, + project: Optional[pulumi.Input[str]] = None, + __props__=None): + """ + A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + It represents exporting Security Command Center data, including assets, findings, and security marks + using gcloud scc bqexports + > **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. + + To get more information about ProjectSccBigQueryExport, see: + + * [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/projects.locations.bigQueryExports) + * How-to Guides + * [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query) + + ## Example Usage + + ## Import + + ProjectSccBigQueryExport can be imported using any of these accepted formats: + + * `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}` + + * `{{project}}/{{location}}/{{big_query_export_id}}` + + * `{{location}}/{{big_query_export_id}}` + + When using the `pulumi import` command, ProjectSccBigQueryExport can be imported using one of the formats above. For example: + + ```sh + $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} + ``` + + ```sh + $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default {{project}}/{{location}}/{{big_query_export_id}} + ``` + + ```sh + $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default {{location}}/{{big_query_export_id}} + ``` + + :param str resource_name: The name of the resource. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] big_query_export_id: This must be unique within the organization. + + + - - - + :param pulumi.Input[str] dataset: The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + :param pulumi.Input[str] description: The description of the notification config (max of 1024 characters). + :param pulumi.Input[str] filter: Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + The supported value types are: + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + :param pulumi.Input[str] location: location Id is provided by organization. If not provided, Use global as default. + :param pulumi.Input[str] project: The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + """ + ... + @overload + def __init__(__self__, + resource_name: str, + args: V2ProjectSccBigQueryExportArgs, + opts: Optional[pulumi.ResourceOptions] = None): + """ + A Cloud Security Command Center (Cloud SCC) Big Query Export Config. + It represents exporting Security Command Center data, including assets, findings, and security marks + using gcloud scc bqexports + > **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. + + To get more information about ProjectSccBigQueryExport, see: + + * [API documentation](https://cloud.google.com/security-command-center/docs/reference/rest/v2/projects.locations.bigQueryExports) + * How-to Guides + * [Official Documentation](https://cloud.google.com/security-command-center/docs/how-to-analyze-findings-in-big-query) + + ## Example Usage + + ## Import + + ProjectSccBigQueryExport can be imported using any of these accepted formats: + + * `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}` + + * `{{project}}/{{location}}/{{big_query_export_id}}` + + * `{{location}}/{{big_query_export_id}}` + + When using the `pulumi import` command, ProjectSccBigQueryExport can be imported using one of the formats above. For example: + + ```sh + $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}} + ``` + + ```sh + $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default {{project}}/{{location}}/{{big_query_export_id}} + ``` + + ```sh + $ pulumi import gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport default {{location}}/{{big_query_export_id}} + ``` + + :param str resource_name: The name of the resource. + :param V2ProjectSccBigQueryExportArgs args: The arguments to use to populate this resource's properties. + :param pulumi.ResourceOptions opts: Options for the resource. + """ + ... + def __init__(__self__, resource_name: str, *args, **kwargs): + resource_args, opts = _utilities.get_resource_args_opts(V2ProjectSccBigQueryExportArgs, pulumi.ResourceOptions, *args, **kwargs) + if resource_args is not None: + __self__._internal_init(resource_name, opts, **resource_args.__dict__) + else: + __self__._internal_init(resource_name, *args, **kwargs) + + def _internal_init(__self__, + resource_name: str, + opts: Optional[pulumi.ResourceOptions] = None, + big_query_export_id: Optional[pulumi.Input[str]] = None, + dataset: Optional[pulumi.Input[str]] = None, + description: Optional[pulumi.Input[str]] = None, + filter: Optional[pulumi.Input[str]] = None, + location: Optional[pulumi.Input[str]] = None, + project: Optional[pulumi.Input[str]] = None, + __props__=None): + opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) + if not isinstance(opts, pulumi.ResourceOptions): + raise TypeError('Expected resource options to be a ResourceOptions instance') + if opts.id is None: + if __props__ is not None: + raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') + __props__ = V2ProjectSccBigQueryExportArgs.__new__(V2ProjectSccBigQueryExportArgs) + + if big_query_export_id is None and not opts.urn: + raise TypeError("Missing required property 'big_query_export_id'") + __props__.__dict__["big_query_export_id"] = big_query_export_id + __props__.__dict__["dataset"] = dataset + __props__.__dict__["description"] = description + __props__.__dict__["filter"] = filter + __props__.__dict__["location"] = location + __props__.__dict__["project"] = project + __props__.__dict__["create_time"] = None + __props__.__dict__["most_recent_editor"] = None + __props__.__dict__["name"] = None + __props__.__dict__["principal"] = None + __props__.__dict__["update_time"] = None + super(V2ProjectSccBigQueryExport, __self__).__init__( + 'gcp:securitycenter/v2ProjectSccBigQueryExport:V2ProjectSccBigQueryExport', + resource_name, + __props__, + opts) + + @staticmethod + def get(resource_name: str, + id: pulumi.Input[str], + opts: Optional[pulumi.ResourceOptions] = None, + big_query_export_id: Optional[pulumi.Input[str]] = None, + create_time: Optional[pulumi.Input[str]] = None, + dataset: Optional[pulumi.Input[str]] = None, + description: Optional[pulumi.Input[str]] = None, + filter: Optional[pulumi.Input[str]] = None, + location: Optional[pulumi.Input[str]] = None, + most_recent_editor: Optional[pulumi.Input[str]] = None, + name: Optional[pulumi.Input[str]] = None, + principal: Optional[pulumi.Input[str]] = None, + project: Optional[pulumi.Input[str]] = None, + update_time: Optional[pulumi.Input[str]] = None) -> 'V2ProjectSccBigQueryExport': + """ + Get an existing V2ProjectSccBigQueryExport resource's state with the given name, id, and optional extra + properties used to qualify the lookup. + + :param str resource_name: The unique name of the resulting resource. + :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. + :param pulumi.ResourceOptions opts: Options for the resource. + :param pulumi.Input[str] big_query_export_id: This must be unique within the organization. + + + - - - + :param pulumi.Input[str] create_time: The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + :param pulumi.Input[str] dataset: The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + :param pulumi.Input[str] description: The description of the notification config (max of 1024 characters). + :param pulumi.Input[str] filter: Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + The supported value types are: + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + :param pulumi.Input[str] location: location Id is provided by organization. If not provided, Use global as default. + :param pulumi.Input[str] most_recent_editor: Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + :param pulumi.Input[str] name: The resource name of this export, in the format + `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + :param pulumi.Input[str] principal: The service account that needs permission to create table and upload data to the BigQuery dataset. + :param pulumi.Input[str] project: The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + :param pulumi.Input[str] update_time: The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + """ + opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) + + __props__ = _V2ProjectSccBigQueryExportState.__new__(_V2ProjectSccBigQueryExportState) + + __props__.__dict__["big_query_export_id"] = big_query_export_id + __props__.__dict__["create_time"] = create_time + __props__.__dict__["dataset"] = dataset + __props__.__dict__["description"] = description + __props__.__dict__["filter"] = filter + __props__.__dict__["location"] = location + __props__.__dict__["most_recent_editor"] = most_recent_editor + __props__.__dict__["name"] = name + __props__.__dict__["principal"] = principal + __props__.__dict__["project"] = project + __props__.__dict__["update_time"] = update_time + return V2ProjectSccBigQueryExport(resource_name, opts=opts, __props__=__props__) + + @property + @pulumi.getter(name="bigQueryExportId") + def big_query_export_id(self) -> pulumi.Output[str]: + """ + This must be unique within the organization. + + + - - - + """ + return pulumi.get(self, "big_query_export_id") + + @property + @pulumi.getter(name="createTime") + def create_time(self) -> pulumi.Output[str]: + """ + The time at which the BigQuery export was created. This field is set by the server and will be ignored if provided on export on creation. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + """ + return pulumi.get(self, "create_time") + + @property + @pulumi.getter + def dataset(self) -> pulumi.Output[Optional[str]]: + """ + The dataset to write findings' updates to. + Its format is "projects/[projectId]/datasets/[bigquery_dataset_id]". + BigQuery Dataset unique ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + """ + return pulumi.get(self, "dataset") + + @property + @pulumi.getter + def description(self) -> pulumi.Output[Optional[str]]: + """ + The description of the notification config (max of 1024 characters). + """ + return pulumi.get(self, "description") + + @property + @pulumi.getter + def filter(self) -> pulumi.Output[Optional[str]]: + """ + Expression that defines the filter to apply across create/update + events of findings. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + The supported operators are: + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + The supported value types are: + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. + """ + return pulumi.get(self, "filter") + + @property + @pulumi.getter + def location(self) -> pulumi.Output[Optional[str]]: + """ + location Id is provided by organization. If not provided, Use global as default. + """ + return pulumi.get(self, "location") + + @property + @pulumi.getter(name="mostRecentEditor") + def most_recent_editor(self) -> pulumi.Output[str]: + """ + Email address of the user who last edited the BigQuery export. + This field is set by the server and will be ignored if provided on export creation or update. + """ + return pulumi.get(self, "most_recent_editor") + + @property + @pulumi.getter + def name(self) -> pulumi.Output[str]: + """ + The resource name of this export, in the format + `projects/{{project}}/locations/{{location}}/bigQueryExports/{{big_query_export_id}}`. + This field is provided in responses, and is ignored when provided in create requests. + """ + return pulumi.get(self, "name") + + @property + @pulumi.getter + def principal(self) -> pulumi.Output[str]: + """ + The service account that needs permission to create table and upload data to the BigQuery dataset. + """ + return pulumi.get(self, "principal") + + @property + @pulumi.getter + def project(self) -> pulumi.Output[str]: + """ + The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + """ + return pulumi.get(self, "project") + + @property + @pulumi.getter(name="updateTime") + def update_time(self) -> pulumi.Output[str]: + """ + The most recent time at which the BigQuery export was updated. This field is set by the server and will be ignored if provided on export creation or update. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + """ + return pulumi.get(self, "update_time") + diff --git a/upstream b/upstream index 37f2e16c12..5323032817 160000 --- a/upstream +++ b/upstream @@ -1 +1 @@ -Subproject commit 37f2e16c123ab4c6ac92bace8b6424c925783872 +Subproject commit 532303281773dba5779710f38b8f1cd4b65af6db